root commited on
Commit
136be26
·
1 Parent(s): 29fdfde

fixing python and stuff

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Colab_demo.ipynb +127 -0
  2. LICENSE +661 -0
  3. Practical-RIFE +0 -1
  4. Practical-RIFE/Colab_demo.ipynb +127 -0
  5. Practical-RIFE/inference_img.py +118 -0
  6. Practical-RIFE/inference_img_SR.py +69 -0
  7. Practical-RIFE/inference_video.py +293 -0
  8. Practical-RIFE/inference_video_enhance.py +201 -0
  9. Practical-RIFE/model/__pycache__/loss.cpython-310.pyc +0 -0
  10. Practical-RIFE/model/__pycache__/warplayer.cpython-310.pyc +0 -0
  11. Practical-RIFE/model/loss.py +128 -0
  12. Practical-RIFE/model/pytorch_msssim/__init__.py +200 -0
  13. Practical-RIFE/model/pytorch_msssim/__pycache__/__init__.cpython-310.pyc +0 -0
  14. Practical-RIFE/model/warplayer.py +22 -0
  15. Practical-RIFE/train_log/.DS_Store +0 -0
  16. Practical-RIFE/train_log/IFNet_HDv3.py +156 -0
  17. Practical-RIFE/train_log/RIFE_HDv3.py +89 -0
  18. Practical-RIFE/train_log/__pycache__/IFNet_HDv3.cpython-310.pyc +0 -0
  19. Practical-RIFE/train_log/__pycache__/RIFE_HDv3.cpython-310.pyc +0 -0
  20. Practical-RIFE/train_log/flownet.pkl +3 -0
  21. Practical-RIFE/train_log/refine.py +90 -0
  22. README.md +154 -8
  23. __pycache__/handler.cpython-310.pyc +0 -0
  24. __pycache__/settings.cpython-310.pyc +0 -0
  25. clip/__init__.py +1 -0
  26. clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  27. clip/clip.py +241 -0
  28. clip/clipseg.py +538 -0
  29. clip/model.py +436 -0
  30. clip/simple_tokenizer.py +132 -0
  31. clip/vitseg.py +286 -0
  32. config_colab.yaml +14 -0
  33. handler.py +33 -47
  34. inference_img.py +118 -0
  35. inference_img_SR.py +69 -0
  36. inference_video.py +293 -0
  37. inference_video_enhance.py +201 -0
  38. installer/installer.py +87 -0
  39. installer/windows_run.bat +99 -0
  40. model/__pycache__/loss.cpython-310.pyc +0 -0
  41. model/__pycache__/warplayer.cpython-310.pyc +0 -0
  42. model/loss.py +128 -0
  43. model/pytorch_msssim/__init__.py +200 -0
  44. model/pytorch_msssim/__pycache__/__init__.cpython-310.pyc +0 -0
  45. model/warplayer.py +22 -0
  46. models/CLIP/rd64-uni-refined.pth +3 -0
  47. models/CodeFormer/CodeFormerv0.1.onnx +3 -0
  48. models/DMDNet.pth +3 -0
  49. models/Frame/deoldify_artistic.onnx +3 -0
  50. models/Frame/deoldify_stable.onnx +3 -0
Colab_demo.ipynb ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "name": "Untitled0.ipynb",
7
+ "provenance": [],
8
+ "include_colab_link": true
9
+ },
10
+ "kernelspec": {
11
+ "name": "python3",
12
+ "display_name": "Python 3"
13
+ },
14
+ "accelerator": "GPU"
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "markdown",
19
+ "metadata": {
20
+ "id": "view-in-github",
21
+ "colab_type": "text"
22
+ },
23
+ "source": [
24
+ "<a href=\"https://colab.research.google.com/github/hzwer/Practical-RIFE/blob/main/Colab_demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "metadata": {
30
+ "id": "FypCcZkNNt2p"
31
+ },
32
+ "source": [
33
+ "%cd /content\n",
34
+ "!git clone https://github.com/hzwer/Practical-RIFE"
35
+ ],
36
+ "execution_count": null,
37
+ "outputs": []
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "metadata": {
42
+ "id": "1wysVHxoN54f"
43
+ },
44
+ "source": [
45
+ "!gdown --id 1O5KfS3KzZCY3imeCr2LCsntLhutKuAqj\n",
46
+ "!7z e Practical-RIFE/RIFE_trained_model_v3.8.zip"
47
+ ],
48
+ "execution_count": null,
49
+ "outputs": []
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "metadata": {
54
+ "id": "AhbHfRBJRAUt"
55
+ },
56
+ "source": [
57
+ "!mkdir /content/Practical-RIFE/train_log\n",
58
+ "!mv *.py /content/Practical-RIFE/train_log/\n",
59
+ "!mv *.pkl /content/Practical-RIFE/train_log/\n",
60
+ "%cd /content/Practical-RIFE/\n",
61
+ "!gdown --id 1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc\n",
62
+ "!pip3 install -r requirements.txt"
63
+ ],
64
+ "execution_count": null,
65
+ "outputs": []
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {
70
+ "id": "rirngW5uRMdg"
71
+ },
72
+ "source": [
73
+ "Please upload your video to content/Practical-RIFE/video.mp4, or use our demo video."
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "metadata": {
79
+ "id": "dnLn4aHHPzN3"
80
+ },
81
+ "source": [
82
+ "!nvidia-smi\n",
83
+ "!python3 inference_video.py --exp=1 --video=demo.mp4 --montage --skip"
84
+ ],
85
+ "execution_count": null,
86
+ "outputs": []
87
+ },
88
+ {
89
+ "cell_type": "markdown",
90
+ "metadata": {
91
+ "id": "77KK6lxHgJhf"
92
+ },
93
+ "source": [
94
+ "Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n",
95
+ "For example: \n",
96
+ "--fps=60 --exp=1 --video=mydemo.avi --png"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "metadata": {
102
+ "id": "0zIBbVE3UfUD",
103
+ "cellView": "code"
104
+ },
105
+ "source": [
106
+ "from IPython.display import display, Image\n",
107
+ "import moviepy.editor as mpy\n",
108
+ "display(mpy.ipython_display('demo_4X_100fps.mp4', height=256, max_duration=100.))"
109
+ ],
110
+ "execution_count": null,
111
+ "outputs": []
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "metadata": {
116
+ "id": "tWkJCNgP3zXA"
117
+ },
118
+ "source": [
119
+ "!python3 inference_img.py --img demo/I0_0.png demo/I0_1.png\n",
120
+ "ffmpeg -r 10 -f image2 -i output/img%d.png -s 448x256 -vf \"split[s0][s1];[s0]palettegen=stats_mode=single[p];[s1][p]paletteuse=new=1\" output/slomo.gif\n",
121
+ "# Image interpolation"
122
+ ],
123
+ "execution_count": null,
124
+ "outputs": []
125
+ }
126
+ ]
127
+ }
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
Practical-RIFE DELETED
@@ -1 +0,0 @@
1
- Subproject commit f3e48ceb02e4c21bc8868b03994b98f3402ffb3d
 
 
Practical-RIFE/Colab_demo.ipynb ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "name": "Untitled0.ipynb",
7
+ "provenance": [],
8
+ "include_colab_link": true
9
+ },
10
+ "kernelspec": {
11
+ "name": "python3",
12
+ "display_name": "Python 3"
13
+ },
14
+ "accelerator": "GPU"
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "markdown",
19
+ "metadata": {
20
+ "id": "view-in-github",
21
+ "colab_type": "text"
22
+ },
23
+ "source": [
24
+ "<a href=\"https://colab.research.google.com/github/hzwer/Practical-RIFE/blob/main/Colab_demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "metadata": {
30
+ "id": "FypCcZkNNt2p"
31
+ },
32
+ "source": [
33
+ "%cd /content\n",
34
+ "!git clone https://github.com/hzwer/Practical-RIFE"
35
+ ],
36
+ "execution_count": null,
37
+ "outputs": []
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "metadata": {
42
+ "id": "1wysVHxoN54f"
43
+ },
44
+ "source": [
45
+ "!gdown --id 1O5KfS3KzZCY3imeCr2LCsntLhutKuAqj\n",
46
+ "!7z e Practical-RIFE/RIFE_trained_model_v3.8.zip"
47
+ ],
48
+ "execution_count": null,
49
+ "outputs": []
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "metadata": {
54
+ "id": "AhbHfRBJRAUt"
55
+ },
56
+ "source": [
57
+ "!mkdir /content/Practical-RIFE/train_log\n",
58
+ "!mv *.py /content/Practical-RIFE/train_log/\n",
59
+ "!mv *.pkl /content/Practical-RIFE/train_log/\n",
60
+ "%cd /content/Practical-RIFE/\n",
61
+ "!gdown --id 1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc\n",
62
+ "!pip3 install -r requirements.txt"
63
+ ],
64
+ "execution_count": null,
65
+ "outputs": []
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {
70
+ "id": "rirngW5uRMdg"
71
+ },
72
+ "source": [
73
+ "Please upload your video to content/Practical-RIFE/video.mp4, or use our demo video."
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "metadata": {
79
+ "id": "dnLn4aHHPzN3"
80
+ },
81
+ "source": [
82
+ "!nvidia-smi\n",
83
+ "!python3 inference_video.py --exp=1 --video=demo.mp4 --montage --skip"
84
+ ],
85
+ "execution_count": null,
86
+ "outputs": []
87
+ },
88
+ {
89
+ "cell_type": "markdown",
90
+ "metadata": {
91
+ "id": "77KK6lxHgJhf"
92
+ },
93
+ "source": [
94
+ "Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n",
95
+ "For example: \n",
96
+ "--fps=60 --exp=1 --video=mydemo.avi --png"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "metadata": {
102
+ "id": "0zIBbVE3UfUD",
103
+ "cellView": "code"
104
+ },
105
+ "source": [
106
+ "from IPython.display import display, Image\n",
107
+ "import moviepy.editor as mpy\n",
108
+ "display(mpy.ipython_display('demo_4X_100fps.mp4', height=256, max_duration=100.))"
109
+ ],
110
+ "execution_count": null,
111
+ "outputs": []
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "metadata": {
116
+ "id": "tWkJCNgP3zXA"
117
+ },
118
+ "source": [
119
+ "!python3 inference_img.py --img demo/I0_0.png demo/I0_1.png\n",
120
+ "ffmpeg -r 10 -f image2 -i output/img%d.png -s 448x256 -vf \"split[s0][s1];[s0]palettegen=stats_mode=single[p];[s1][p]paletteuse=new=1\" output/slomo.gif\n",
121
+ "# Image interpolation"
122
+ ],
123
+ "execution_count": null,
124
+ "outputs": []
125
+ }
126
+ ]
127
+ }
Practical-RIFE/inference_img.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ from torch.nn import functional as F
6
+ import warnings
7
+ warnings.filterwarnings("ignore")
8
+
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+ torch.set_grad_enabled(False)
11
+ if torch.cuda.is_available():
12
+ torch.backends.cudnn.enabled = True
13
+ torch.backends.cudnn.benchmark = True
14
+
15
+ parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
16
+ parser.add_argument('--img', dest='img', nargs=2, required=True)
17
+ parser.add_argument('--exp', default=4, type=int)
18
+ parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
19
+ parser.add_argument('--rthreshold', default=0.02, type=float, help='returns image when actual ratio falls in given range threshold')
20
+ parser.add_argument('--rmaxcycles', default=8, type=int, help='limit max number of bisectional cycles')
21
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
22
+
23
+ args = parser.parse_args()
24
+
25
+ try:
26
+ try:
27
+ from model.RIFE_HDv2 import Model
28
+ model = Model()
29
+ model.load_model(args.modelDir, -1)
30
+ print("Loaded v2.x HD model.")
31
+ except:
32
+ from train_log.RIFE_HDv3 import Model
33
+ model = Model()
34
+ model.load_model(args.modelDir, -1)
35
+ print("Loaded v3.x HD model.")
36
+ except:
37
+ from model.RIFE_HD import Model
38
+ model = Model()
39
+ model.load_model(args.modelDir, -1)
40
+ print("Loaded v1.x HD model")
41
+ if not hasattr(model, 'version'):
42
+ model.version = 0
43
+ model.eval()
44
+ model.device()
45
+
46
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
47
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
48
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
49
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
50
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)
51
+
52
+ else:
53
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
54
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
55
+ img0 = cv2.resize(img0, (448, 256))
56
+ img1 = cv2.resize(img1, (448, 256))
57
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
58
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
59
+
60
+ n, c, h, w = img0.shape
61
+ ph = ((h - 1) // 64 + 1) * 64
62
+ pw = ((w - 1) // 64 + 1) * 64
63
+ padding = (0, pw - w, 0, ph - h)
64
+ img0 = F.pad(img0, padding)
65
+ img1 = F.pad(img1, padding)
66
+
67
+
68
+ if args.ratio:
69
+ if model.version >= 3.9:
70
+ img_list = [img0, model.inference(img0, img1, args.ratio), img1]
71
+ else:
72
+ img0_ratio = 0.0
73
+ img1_ratio = 1.0
74
+ if args.ratio <= img0_ratio + args.rthreshold / 2:
75
+ middle = img0
76
+ elif args.ratio >= img1_ratio - args.rthreshold / 2:
77
+ middle = img1
78
+ else:
79
+ tmp_img0 = img0
80
+ tmp_img1 = img1
81
+ for inference_cycle in range(args.rmaxcycles):
82
+ middle = model.inference(tmp_img0, tmp_img1)
83
+ middle_ratio = ( img0_ratio + img1_ratio ) / 2
84
+ if args.ratio - (args.rthreshold / 2) <= middle_ratio <= args.ratio + (args.rthreshold / 2):
85
+ break
86
+ if args.ratio > middle_ratio:
87
+ tmp_img0 = middle
88
+ img0_ratio = middle_ratio
89
+ else:
90
+ tmp_img1 = middle
91
+ img1_ratio = middle_ratio
92
+ img_list.append(middle)
93
+ img_list.append(img1)
94
+ else:
95
+ if model.version >= 3.9:
96
+ img_list = [img0]
97
+ n = 2 ** args.exp
98
+ for i in range(n-1):
99
+ img_list.append(model.inference(img0, img1, (i+1) * 1. / n))
100
+ img_list.append(img1)
101
+ else:
102
+ img_list = [img0, img1]
103
+ for i in range(args.exp):
104
+ tmp = []
105
+ for j in range(len(img_list) - 1):
106
+ mid = model.inference(img_list[j], img_list[j + 1])
107
+ tmp.append(img_list[j])
108
+ tmp.append(mid)
109
+ tmp.append(img1)
110
+ img_list = tmp
111
+
112
+ if not os.path.exists('output'):
113
+ os.mkdir('output')
114
+ for i in range(len(img_list)):
115
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
116
+ cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).cpu().numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
117
+ else:
118
+ cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
Practical-RIFE/inference_img_SR.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ from torch.nn import functional as F
6
+ import warnings
7
+ warnings.filterwarnings("ignore")
8
+
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+ torch.set_grad_enabled(False)
11
+ if torch.cuda.is_available():
12
+ torch.backends.cudnn.enabled = True
13
+ torch.backends.cudnn.benchmark = True
14
+
15
+ parser = argparse.ArgumentParser(description='STVSR for a pair of images')
16
+ parser.add_argument('--img', dest='img', nargs=2, required=True)
17
+ parser.add_argument('--exp', default=2, type=int)
18
+ parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
19
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
20
+
21
+ args = parser.parse_args()
22
+
23
+ from train_log.model import Model
24
+ model = Model()
25
+ model.device()
26
+ model.load_model('train_log')
27
+ model.eval()
28
+
29
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
30
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
31
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
32
+ img0 = cv2.resize(img0, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
33
+ img1 = cv2.resize(img1, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
34
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
35
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)
36
+ else:
37
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
38
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
39
+ img0 = cv2.resize(img0, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
40
+ img1 = cv2.resize(img1, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
41
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
42
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
43
+
44
+ n, c, h, w = img0.shape
45
+ ph = ((h - 1) // 32 + 1) * 32
46
+ pw = ((w - 1) // 32 + 1) * 32
47
+ padding = (0, pw - w, 0, ph - h)
48
+ img0 = F.pad(img0, padding)
49
+ img1 = F.pad(img1, padding)
50
+
51
+ if args.ratio:
52
+ print('ratio={}'.format(args.ratio))
53
+ img_list = model.inference(img0, img1, timestep=args.ratio)
54
+ else:
55
+ n = 2 ** args.exp - 1
56
+ time_list = [0]
57
+ for i in range(n):
58
+ time_list.append((i+1) * 1. / (n+1))
59
+ time_list.append(1)
60
+ print(time_list)
61
+ img_list = model.inference(img0, img1, timestep=time_list)
62
+
63
+ if not os.path.exists('output'):
64
+ os.mkdir('output')
65
+ for i in range(len(img_list)):
66
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
67
+ cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).cpu().numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
68
+ else:
69
+ cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
Practical-RIFE/inference_video.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ from torch.nn import functional as F
8
+ import warnings
9
+ import _thread
10
+ import skvideo.io
11
+ from queue import Queue, Empty
12
+ from model.pytorch_msssim import ssim_matlab
13
+
14
+ warnings.filterwarnings("ignore")
15
+
16
+ def transferAudio(sourceVideo, targetVideo):
17
+ import shutil
18
+ import moviepy.editor
19
+ tempAudioFileName = "./temp/audio.mkv"
20
+
21
+ # split audio from original video file and store in "temp" directory
22
+ if True:
23
+
24
+ # clear old "temp" directory if it exits
25
+ if os.path.isdir("temp"):
26
+ # remove temp directory
27
+ shutil.rmtree("temp")
28
+ # create new "temp" directory
29
+ os.makedirs("temp")
30
+ # extract audio from video
31
+ os.system('ffmpeg -y -i "{}" -c:a copy -vn {}'.format(sourceVideo, tempAudioFileName))
32
+
33
+ targetNoAudio = os.path.splitext(targetVideo)[0] + "_noaudio" + os.path.splitext(targetVideo)[1]
34
+ os.rename(targetVideo, targetNoAudio)
35
+ # combine audio file and new video file
36
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
37
+
38
+ if os.path.getsize(targetVideo) == 0: # if ffmpeg failed to merge the video and audio together try converting the audio to aac
39
+ tempAudioFileName = "./temp/audio.m4a"
40
+ os.system('ffmpeg -y -i "{}" -c:a aac -b:a 160k -vn {}'.format(sourceVideo, tempAudioFileName))
41
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
42
+ if (os.path.getsize(targetVideo) == 0): # if aac is not supported by selected format
43
+ os.rename(targetNoAudio, targetVideo)
44
+ print("Audio transfer failed. Interpolated video will have no audio")
45
+ else:
46
+ print("Lossless audio transfer failed. Audio was transcoded to AAC (M4A) instead.")
47
+
48
+ # remove audio-less video
49
+ os.remove(targetNoAudio)
50
+ else:
51
+ os.remove(targetNoAudio)
52
+
53
+ # remove temp directory
54
+ shutil.rmtree("temp")
55
+
56
+ parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
57
+ parser.add_argument('--video', dest='video', type=str, default=None)
58
+ parser.add_argument('--output', dest='output', type=str, default=None)
59
+ parser.add_argument('--img', dest='img', type=str, default=None)
60
+ parser.add_argument('--montage', dest='montage', action='store_true', help='montage origin video')
61
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
62
+ parser.add_argument('--fp16', dest='fp16', action='store_true', help='fp16 mode for faster and more lightweight inference on cards with Tensor Cores')
63
+ parser.add_argument('--UHD', dest='UHD', action='store_true', help='support 4k video')
64
+ parser.add_argument('--scale', dest='scale', type=float, default=1.0, help='Try scale=0.5 for 4k video')
65
+ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
66
+ parser.add_argument('--fps', dest='fps', type=int, default=None)
67
+ parser.add_argument('--png', dest='png', action='store_true', help='whether to vid_out png format vid_outs')
68
+ parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='vid_out video extension')
69
+ parser.add_argument('--exp', dest='exp', type=int, default=1)
70
+ parser.add_argument('--multi', dest='multi', type=int, default=2)
71
+
72
+ args = parser.parse_args()
73
+ if args.exp != 1:
74
+ args.multi = (2 ** args.exp)
75
+ assert (not args.video is None or not args.img is None)
76
+ if args.skip:
77
+ print("skip flag is abandoned, please refer to issue #207.")
78
+ if args.UHD and args.scale==1.0:
79
+ args.scale = 0.5
80
+ assert args.scale in [0.25, 0.5, 1.0, 2.0, 4.0]
81
+ if not args.img is None:
82
+ args.png = True
83
+
84
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
85
+ torch.set_grad_enabled(False)
86
+ if torch.cuda.is_available():
87
+ torch.backends.cudnn.enabled = True
88
+ torch.backends.cudnn.benchmark = True
89
+ if(args.fp16):
90
+ torch.set_default_tensor_type(torch.cuda.HalfTensor)
91
+
92
+ try:
93
+ from train_log.RIFE_HDv3 import Model
94
+ except:
95
+ print("Please download our model from model list")
96
+ model = Model()
97
+ if not hasattr(model, 'version'):
98
+ model.version = 0
99
+ model.load_model(args.modelDir, -1)
100
+ print("Loaded 3.x/4.x HD model.")
101
+ model.eval()
102
+ model.device()
103
+
104
+ if not args.video is None:
105
+ videoCapture = cv2.VideoCapture(args.video)
106
+ fps = videoCapture.get(cv2.CAP_PROP_FPS)
107
+ tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
108
+ videoCapture.release()
109
+ if args.fps is None:
110
+ fpsNotAssigned = True
111
+ args.fps = fps * args.multi
112
+ else:
113
+ fpsNotAssigned = False
114
+ videogen = skvideo.io.vreader(args.video)
115
+ lastframe = next(videogen)
116
+ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
117
+ video_path_wo_ext, ext = os.path.splitext(args.video)
118
+ print('{}.{}, {} frames in total, {}FPS to {}FPS'.format(video_path_wo_ext, args.ext, tot_frame, fps, args.fps))
119
+ if args.png == False and fpsNotAssigned == True:
120
+ print("The audio will be merged after interpolation process")
121
+ else:
122
+ print("Will not merge audio because using png or fps flag!")
123
+ else:
124
+ videogen = []
125
+ for f in os.listdir(args.img):
126
+ if 'png' in f:
127
+ videogen.append(f)
128
+ tot_frame = len(videogen)
129
+ videogen.sort(key= lambda x:int(x[:-4]))
130
+ lastframe = cv2.imread(os.path.join(args.img, videogen[0]), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
131
+ videogen = videogen[1:]
132
+ h, w, _ = lastframe.shape
133
+ vid_out_name = None
134
+ vid_out = None
135
+ if args.png:
136
+ if not os.path.exists('vid_out'):
137
+ os.mkdir('vid_out')
138
+ else:
139
+ if args.output is not None:
140
+ print("Out")
141
+ vid_out_name = args.output
142
+ else:
143
+ vid_out_name = '{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.multi, int(np.round(args.fps)), args.ext)
144
+ print("Width is ", w," and height is ", h)
145
+ vid_out = cv2.VideoWriter(vid_out_name, fourcc, args.fps, (w, h))
146
+
147
+ def clear_write_buffer(user_args, write_buffer):
148
+ cnt = 0
149
+ while True:
150
+ item = write_buffer.get()
151
+ if item is None:
152
+ break
153
+ if user_args.png:
154
+ cv2.imwrite('vid_out/{:0>7d}.png'.format(cnt), item[:, :, ::-1])
155
+ cnt += 1
156
+ else:
157
+ vid_out.write(item[:, :, ::-1])
158
+
159
+ def build_read_buffer(user_args, read_buffer, videogen):
160
+ try:
161
+ for frame in videogen:
162
+ if not user_args.img is None:
163
+ frame = cv2.imread(os.path.join(user_args.img, frame), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
164
+ if user_args.montage:
165
+ frame = frame[:, left: left + w]
166
+ read_buffer.put(frame)
167
+ except:
168
+ pass
169
+ read_buffer.put(None)
170
+
171
+ def make_inference(I0, I1, n):
172
+ global model
173
+ if model.version >= 3.9:
174
+ res = []
175
+ for i in range(n):
176
+ res.append(model.inference(I0, I1, (i+1) * 1. / (n+1), args.scale))
177
+ return res
178
+ else:
179
+ middle = model.inference(I0, I1, args.scale)
180
+ if n == 1:
181
+ return [middle]
182
+ first_half = make_inference(I0, middle, n=n//2)
183
+ second_half = make_inference(middle, I1, n=n//2)
184
+ if n%2:
185
+ return [*first_half, middle, *second_half]
186
+ else:
187
+ return [*first_half, *second_half]
188
+
189
+ def pad_image(img):
190
+ if(args.fp16):
191
+ return F.pad(img, padding).half()
192
+ else:
193
+ return F.pad(img, padding)
194
+
195
+ if args.montage:
196
+ left = w // 4
197
+ w = w // 2
198
+ tmp = max(128, int(128 / args.scale))
199
+ ph = ((h - 1) // tmp + 1) * tmp
200
+ pw = ((w - 1) // tmp + 1) * tmp
201
+ padding = (0, pw - w, 0, ph - h)
202
+ pbar = tqdm(total=tot_frame)
203
+ if args.montage:
204
+ lastframe = lastframe[:, left: left + w]
205
+ write_buffer = Queue(maxsize=500)
206
+ read_buffer = Queue(maxsize=500)
207
+ _thread.start_new_thread(build_read_buffer, (args, read_buffer, videogen))
208
+ _thread.start_new_thread(clear_write_buffer, (args, write_buffer))
209
+
210
+ I1 = torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
211
+ I1 = pad_image(I1)
212
+ temp = None # save lastframe when processing static frame
213
+
214
+ while True:
215
+ if temp is not None:
216
+ frame = temp
217
+ temp = None
218
+ else:
219
+ frame = read_buffer.get()
220
+ if frame is None:
221
+ break
222
+ I0 = I1
223
+ I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
224
+ I1 = pad_image(I1)
225
+ I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False)
226
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
227
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
228
+
229
+ break_flag = False
230
+ if ssim > 0.996:
231
+ frame = read_buffer.get() # read a new frame
232
+ if frame is None:
233
+ break_flag = True
234
+ frame = lastframe
235
+ else:
236
+ temp = frame
237
+ I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
238
+ I1 = pad_image(I1)
239
+ I1 = model.inference(I0, I1, args.scale)
240
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
241
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
242
+ frame = (I1[0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w]
243
+
244
+ if ssim < 0.2:
245
+ output = []
246
+ for i in range(args.multi - 1):
247
+ output.append(I0)
248
+ '''
249
+ output = []
250
+ step = 1 / args.multi
251
+ alpha = 0
252
+ for i in range(args.multi - 1):
253
+ alpha += step
254
+ beta = 1-alpha
255
+ output.append(torch.from_numpy(np.transpose((cv2.addWeighted(frame[:, :, ::-1], alpha, lastframe[:, :, ::-1], beta, 0)[:, :, ::-1].copy()), (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.)
256
+ '''
257
+ else:
258
+ output = make_inference(I0, I1, args.multi-1)
259
+
260
+ if args.montage:
261
+ write_buffer.put(np.concatenate((lastframe, lastframe), 1))
262
+ for mid in output:
263
+ mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
264
+ write_buffer.put(np.concatenate((lastframe, mid[:h, :w]), 1))
265
+ else:
266
+ write_buffer.put(lastframe)
267
+ for mid in output:
268
+ mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
269
+ write_buffer.put(mid[:h, :w])
270
+ pbar.update(1)
271
+ lastframe = frame
272
+ if break_flag:
273
+ break
274
+
275
+ if args.montage:
276
+ write_buffer.put(np.concatenate((lastframe, lastframe), 1))
277
+ else:
278
+ write_buffer.put(lastframe)
279
+ import time
280
+ while(not write_buffer.empty()):
281
+ time.sleep(0.1)
282
+ pbar.close()
283
+ if not vid_out is None:
284
+ vid_out.release()
285
+
286
+ # move audio to new video file if appropriate
287
+ # if args.png == False and fpsNotAssigned == True and not args.video is None:
288
+ # try:
289
+ # transferAudio(args.video, vid_out_name)
290
+ # except:
291
+ # print("Audio transfer failed. Interpolated video will have no audio")
292
+ # targetNoAudio = os.path.splitext(vid_out_name)[0] + "_noaudio" + os.path.splitext(vid_out_name)[1]
293
+ # os.rename(targetNoAudio, vid_out_name)
Practical-RIFE/inference_video_enhance.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ from torch.nn import functional as F
8
+ import warnings
9
+ import _thread
10
+ import skvideo.io
11
+ from queue import Queue, Empty
12
+ from model.pytorch_msssim import ssim_matlab
13
+
14
+ warnings.filterwarnings("ignore")
15
+
16
+ def transferAudio(sourceVideo, targetVideo):
17
+ import shutil
18
+ import moviepy.editor
19
+ tempAudioFileName = "./temp/audio.mkv"
20
+
21
+ # split audio from original video file and store in "temp" directory
22
+ if True:
23
+
24
+ # clear old "temp" directory if it exits
25
+ if os.path.isdir("temp"):
26
+ # remove temp directory
27
+ shutil.rmtree("temp")
28
+ # create new "temp" directory
29
+ os.makedirs("temp")
30
+ # extract audio from video
31
+ os.system('ffmpeg -y -i "{}" -c:a copy -vn {}'.format(sourceVideo, tempAudioFileName))
32
+
33
+ targetNoAudio = os.path.splitext(targetVideo)[0] + "_noaudio" + os.path.splitext(targetVideo)[1]
34
+ os.rename(targetVideo, targetNoAudio)
35
+ # combine audio file and new video file
36
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
37
+
38
+ if os.path.getsize(targetVideo) == 0: # if ffmpeg failed to merge the video and audio together try converting the audio to aac
39
+ tempAudioFileName = "./temp/audio.m4a"
40
+ os.system('ffmpeg -y -i "{}" -c:a aac -b:a 160k -vn {}'.format(sourceVideo, tempAudioFileName))
41
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
42
+ if (os.path.getsize(targetVideo) == 0): # if aac is not supported by selected format
43
+ os.rename(targetNoAudio, targetVideo)
44
+ print("Audio transfer failed. Interpolated video will have no audio")
45
+ else:
46
+ print("Lossless audio transfer failed. Audio was transcoded to AAC (M4A) instead.")
47
+
48
+ # remove audio-less video
49
+ os.remove(targetNoAudio)
50
+ else:
51
+ os.remove(targetNoAudio)
52
+
53
+ # remove temp directory
54
+ shutil.rmtree("temp")
55
+
56
+ parser = argparse.ArgumentParser(description='Video SR')
57
+ parser.add_argument('--video', dest='video', type=str, default=None)
58
+ parser.add_argument('--output', dest='output', type=str, default=None)
59
+ parser.add_argument('--img', dest='img', type=str, default=None)
60
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log_SAFA', help='directory with trained model files')
61
+ parser.add_argument('--fp16', dest='fp16', action='store_true', help='fp16 mode for faster and more lightweight inference on cards with Tensor Cores')
62
+ parser.add_argument('--png', dest='png', action='store_true', help='whether to vid_out png format vid_outs')
63
+ parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='vid_out video extension')
64
+
65
+ args = parser.parse_args()
66
+ assert (not args.video is None or not args.img is None)
67
+ if not args.img is None:
68
+ args.png = True
69
+
70
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
71
+ torch.set_grad_enabled(False)
72
+ if torch.cuda.is_available():
73
+ torch.backends.cudnn.enabled = True
74
+ torch.backends.cudnn.benchmark = True
75
+ if(args.fp16):
76
+ print('set fp16')
77
+ torch.set_default_tensor_type(torch.cuda.HalfTensor)
78
+
79
+ try:
80
+ from train_log_SAFA.model import Model
81
+ except:
82
+ print("Please download our model from model list")
83
+ model = Model()
84
+ model.device()
85
+ model.load_model(args.modelDir)
86
+ print("Loaded SAFA model.")
87
+ model.eval()
88
+
89
+ if not args.video is None:
90
+ videoCapture = cv2.VideoCapture(args.video)
91
+ fps = videoCapture.get(cv2.CAP_PROP_FPS)
92
+ tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
93
+ videoCapture.release()
94
+ fpsNotAssigned = True
95
+ videogen = skvideo.io.vreader(args.video)
96
+ lastframe = next(videogen)
97
+ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
98
+ video_path_wo_ext, ext = os.path.splitext(args.video)
99
+ if args.png == False and fpsNotAssigned == True:
100
+ print("The audio will be merged after interpolation process")
101
+ else:
102
+ print("Will not merge audio because using png or fps flag!")
103
+ else:
104
+ videogen = []
105
+ for f in os.listdir(args.img):
106
+ if 'png' in f:
107
+ videogen.append(f)
108
+ tot_frame = len(videogen)
109
+ videogen.sort(key= lambda x:int(x[:-4]))
110
+ lastframe = cv2.imread(os.path.join(args.img, videogen[0]), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
111
+ videogen = videogen[1:]
112
+
113
+ h, w, _ = lastframe.shape
114
+
115
+ vid_out_name = None
116
+ vid_out = None
117
+ if args.png:
118
+ if not os.path.exists('vid_out'):
119
+ os.mkdir('vid_out')
120
+ else:
121
+ if args.output is not None:
122
+ vid_out_name = args.output
123
+ else:
124
+ vid_out_name = '{}_2X{}'.format(video_path_wo_ext, ext)
125
+ vid_out = cv2.VideoWriter(vid_out_name, fourcc, fps, (w, h))
126
+
127
+ def clear_write_buffer(user_args, write_buffer):
128
+ cnt = 0
129
+ while True:
130
+ item = write_buffer.get()
131
+ if item is None:
132
+ break
133
+ if user_args.png:
134
+ cv2.imwrite('vid_out/{:0>7d}.png'.format(cnt), item[:, :, ::-1])
135
+ cnt += 1
136
+ else:
137
+ vid_out.write(item[:, :, ::-1])
138
+
139
+ def build_read_buffer(user_args, read_buffer, videogen):
140
+ for frame in videogen:
141
+ if not user_args.img is None:
142
+ frame = cv2.imread(os.path.join(user_args.img, frame), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
143
+ # if user_args.montage:
144
+ # frame = frame[:, left: left + w]
145
+ read_buffer.put(frame)
146
+ read_buffer.put(None)
147
+
148
+ def pad_image(img):
149
+ if(args.fp16):
150
+ return F.pad(img, padding, mode='reflect').half()
151
+ else:
152
+ return F.pad(img, padding, mode='reflect')
153
+
154
+ tmp = 64
155
+ ph = ((h - 1) // tmp + 1) * tmp
156
+ pw = ((w - 1) // tmp + 1) * tmp
157
+ padding = (0, pw - w, 0, ph - h)
158
+ pbar = tqdm(total=tot_frame)
159
+ write_buffer = Queue(maxsize=500)
160
+ read_buffer = Queue(maxsize=500)
161
+ _thread.start_new_thread(build_read_buffer, (args, read_buffer, videogen))
162
+ _thread.start_new_thread(clear_write_buffer, (args, write_buffer))
163
+
164
+ while True:
165
+ frame = read_buffer.get()
166
+ if frame is None:
167
+ break
168
+ # lastframe_2x = cv2.resize(lastframe, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
169
+ # frame_2x = cv2.resize(frame, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
170
+ I0 = pad_image(torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.)
171
+ I1 = pad_image(torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.)
172
+ I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False)
173
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
174
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
175
+ if ssim < 0.2:
176
+ out = [model.inference(I0, I0, [0])[0], model.inference(I1, I1, [0])[0]]
177
+ else:
178
+ out = model.inference(I0, I1, [0, 1])
179
+ assert(len(out) == 2)
180
+ write_buffer.put((out[0][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
181
+ write_buffer.put((out[1][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
182
+ lastframe = read_buffer.get()
183
+ if lastframe is None:
184
+ break
185
+ pbar.update(2)
186
+
187
+ import time
188
+ while(not write_buffer.empty()):
189
+ time.sleep(0.1)
190
+ pbar.close()
191
+ if not vid_out is None:
192
+ vid_out.release()
193
+
194
+ # move audio to new video file if appropriate
195
+ if args.png == False and fpsNotAssigned == True and not args.video is None:
196
+ try:
197
+ transferAudio(args.video, vid_out_name)
198
+ except:
199
+ print("Audio transfer failed. Interpolated video will have no audio")
200
+ targetNoAudio = os.path.splitext(vid_out_name)[0] + "_noaudio" + os.path.splitext(vid_out_name)[1]
201
+ os.rename(targetNoAudio, vid_out_name)
Practical-RIFE/model/__pycache__/loss.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
Practical-RIFE/model/__pycache__/warplayer.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
Practical-RIFE/model/loss.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import torchvision.models as models
6
+
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+
10
+ class EPE(nn.Module):
11
+ def __init__(self):
12
+ super(EPE, self).__init__()
13
+
14
+ def forward(self, flow, gt, loss_mask):
15
+ loss_map = (flow - gt.detach()) ** 2
16
+ loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5
17
+ return (loss_map * loss_mask)
18
+
19
+
20
+ class Ternary(nn.Module):
21
+ def __init__(self):
22
+ super(Ternary, self).__init__()
23
+ patch_size = 7
24
+ out_channels = patch_size * patch_size
25
+ self.w = np.eye(out_channels).reshape(
26
+ (patch_size, patch_size, 1, out_channels))
27
+ self.w = np.transpose(self.w, (3, 2, 0, 1))
28
+ self.w = torch.tensor(self.w).float().to(device)
29
+
30
+ def transform(self, img):
31
+ patches = F.conv2d(img, self.w, padding=3, bias=None)
32
+ transf = patches - img
33
+ transf_norm = transf / torch.sqrt(0.81 + transf**2)
34
+ return transf_norm
35
+
36
+ def rgb2gray(self, rgb):
37
+ r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :]
38
+ gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
39
+ return gray
40
+
41
+ def hamming(self, t1, t2):
42
+ dist = (t1 - t2) ** 2
43
+ dist_norm = torch.mean(dist / (0.1 + dist), 1, True)
44
+ return dist_norm
45
+
46
+ def valid_mask(self, t, padding):
47
+ n, _, h, w = t.size()
48
+ inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
49
+ mask = F.pad(inner, [padding] * 4)
50
+ return mask
51
+
52
+ def forward(self, img0, img1):
53
+ img0 = self.transform(self.rgb2gray(img0))
54
+ img1 = self.transform(self.rgb2gray(img1))
55
+ return self.hamming(img0, img1) * self.valid_mask(img0, 1)
56
+
57
+
58
+ class SOBEL(nn.Module):
59
+ def __init__(self):
60
+ super(SOBEL, self).__init__()
61
+ self.kernelX = torch.tensor([
62
+ [1, 0, -1],
63
+ [2, 0, -2],
64
+ [1, 0, -1],
65
+ ]).float()
66
+ self.kernelY = self.kernelX.clone().T
67
+ self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device)
68
+ self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device)
69
+
70
+ def forward(self, pred, gt):
71
+ N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3]
72
+ img_stack = torch.cat(
73
+ [pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0)
74
+ sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1)
75
+ sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1)
76
+ pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:]
77
+ pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:]
78
+
79
+ L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y)
80
+ loss = (L1X+L1Y)
81
+ return loss
82
+
83
+ class MeanShift(nn.Conv2d):
84
+ def __init__(self, data_mean, data_std, data_range=1, norm=True):
85
+ c = len(data_mean)
86
+ super(MeanShift, self).__init__(c, c, kernel_size=1)
87
+ std = torch.Tensor(data_std)
88
+ self.weight.data = torch.eye(c).view(c, c, 1, 1)
89
+ if norm:
90
+ self.weight.data.div_(std.view(c, 1, 1, 1))
91
+ self.bias.data = -1 * data_range * torch.Tensor(data_mean)
92
+ self.bias.data.div_(std)
93
+ else:
94
+ self.weight.data.mul_(std.view(c, 1, 1, 1))
95
+ self.bias.data = data_range * torch.Tensor(data_mean)
96
+ self.requires_grad = False
97
+
98
+ class VGGPerceptualLoss(torch.nn.Module):
99
+ def __init__(self, rank=0):
100
+ super(VGGPerceptualLoss, self).__init__()
101
+ blocks = []
102
+ pretrained = True
103
+ self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features
104
+ self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda()
105
+ for param in self.parameters():
106
+ param.requires_grad = False
107
+
108
+ def forward(self, X, Y, indices=None):
109
+ X = self.normalize(X)
110
+ Y = self.normalize(Y)
111
+ indices = [2, 7, 12, 21, 30]
112
+ weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5]
113
+ k = 0
114
+ loss = 0
115
+ for i in range(indices[-1]):
116
+ X = self.vgg_pretrained_features[i](X)
117
+ Y = self.vgg_pretrained_features[i](Y)
118
+ if (i+1) in indices:
119
+ loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1
120
+ k += 1
121
+ return loss
122
+
123
+ if __name__ == '__main__':
124
+ img0 = torch.zeros(3, 3, 256, 256).float().to(device)
125
+ img1 = torch.tensor(np.random.normal(
126
+ 0, 1, (3, 3, 256, 256))).float().to(device)
127
+ ternary_loss = Ternary()
128
+ print(ternary_loss(img0, img1).shape)
Practical-RIFE/model/pytorch_msssim/__init__.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from math import exp
4
+ import numpy as np
5
+
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ def gaussian(window_size, sigma):
9
+ gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
10
+ return gauss/gauss.sum()
11
+
12
+
13
+ def create_window(window_size, channel=1):
14
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
15
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0).to(device)
16
+ window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
17
+ return window
18
+
19
+ def create_window_3d(window_size, channel=1):
20
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
21
+ _2D_window = _1D_window.mm(_1D_window.t())
22
+ _3D_window = _2D_window.unsqueeze(2) @ (_1D_window.t())
23
+ window = _3D_window.expand(1, channel, window_size, window_size, window_size).contiguous().to(device)
24
+ return window
25
+
26
+
27
+ def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
28
+ # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
29
+ if val_range is None:
30
+ if torch.max(img1) > 128:
31
+ max_val = 255
32
+ else:
33
+ max_val = 1
34
+
35
+ if torch.min(img1) < -0.5:
36
+ min_val = -1
37
+ else:
38
+ min_val = 0
39
+ L = max_val - min_val
40
+ else:
41
+ L = val_range
42
+
43
+ padd = 0
44
+ (_, channel, height, width) = img1.size()
45
+ if window is None:
46
+ real_size = min(window_size, height, width)
47
+ window = create_window(real_size, channel=channel).to(img1.device)
48
+
49
+ # mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
50
+ # mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
51
+ mu1 = F.conv2d(F.pad(img1, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
52
+ mu2 = F.conv2d(F.pad(img2, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
53
+
54
+ mu1_sq = mu1.pow(2)
55
+ mu2_sq = mu2.pow(2)
56
+ mu1_mu2 = mu1 * mu2
57
+
58
+ sigma1_sq = F.conv2d(F.pad(img1 * img1, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_sq
59
+ sigma2_sq = F.conv2d(F.pad(img2 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu2_sq
60
+ sigma12 = F.conv2d(F.pad(img1 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_mu2
61
+
62
+ C1 = (0.01 * L) ** 2
63
+ C2 = (0.03 * L) ** 2
64
+
65
+ v1 = 2.0 * sigma12 + C2
66
+ v2 = sigma1_sq + sigma2_sq + C2
67
+ cs = torch.mean(v1 / v2) # contrast sensitivity
68
+
69
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
70
+
71
+ if size_average:
72
+ ret = ssim_map.mean()
73
+ else:
74
+ ret = ssim_map.mean(1).mean(1).mean(1)
75
+
76
+ if full:
77
+ return ret, cs
78
+ return ret
79
+
80
+
81
+ def ssim_matlab(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
82
+ # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
83
+ if val_range is None:
84
+ if torch.max(img1) > 128:
85
+ max_val = 255
86
+ else:
87
+ max_val = 1
88
+
89
+ if torch.min(img1) < -0.5:
90
+ min_val = -1
91
+ else:
92
+ min_val = 0
93
+ L = max_val - min_val
94
+ else:
95
+ L = val_range
96
+
97
+ padd = 0
98
+ (_, _, height, width) = img1.size()
99
+ if window is None:
100
+ real_size = min(window_size, height, width)
101
+ window = create_window_3d(real_size, channel=1).to(img1.device)
102
+ # Channel is set to 1 since we consider color images as volumetric images
103
+
104
+ img1 = img1.unsqueeze(1)
105
+ img2 = img2.unsqueeze(1)
106
+
107
+ mu1 = F.conv3d(F.pad(img1, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
108
+ mu2 = F.conv3d(F.pad(img2, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
109
+
110
+ mu1_sq = mu1.pow(2)
111
+ mu2_sq = mu2.pow(2)
112
+ mu1_mu2 = mu1 * mu2
113
+
114
+ sigma1_sq = F.conv3d(F.pad(img1 * img1, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_sq
115
+ sigma2_sq = F.conv3d(F.pad(img2 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu2_sq
116
+ sigma12 = F.conv3d(F.pad(img1 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_mu2
117
+
118
+ C1 = (0.01 * L) ** 2
119
+ C2 = (0.03 * L) ** 2
120
+
121
+ v1 = 2.0 * sigma12 + C2
122
+ v2 = sigma1_sq + sigma2_sq + C2
123
+ cs = torch.mean(v1 / v2) # contrast sensitivity
124
+
125
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
126
+
127
+ if size_average:
128
+ ret = ssim_map.mean()
129
+ else:
130
+ ret = ssim_map.mean(1).mean(1).mean(1)
131
+
132
+ if full:
133
+ return ret, cs
134
+ return ret
135
+
136
+
137
+ def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
138
+ device = img1.device
139
+ weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
140
+ levels = weights.size()[0]
141
+ mssim = []
142
+ mcs = []
143
+ for _ in range(levels):
144
+ sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
145
+ mssim.append(sim)
146
+ mcs.append(cs)
147
+
148
+ img1 = F.avg_pool2d(img1, (2, 2))
149
+ img2 = F.avg_pool2d(img2, (2, 2))
150
+
151
+ mssim = torch.stack(mssim)
152
+ mcs = torch.stack(mcs)
153
+
154
+ # Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
155
+ if normalize:
156
+ mssim = (mssim + 1) / 2
157
+ mcs = (mcs + 1) / 2
158
+
159
+ pow1 = mcs ** weights
160
+ pow2 = mssim ** weights
161
+ # From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
162
+ output = torch.prod(pow1[:-1] * pow2[-1])
163
+ return output
164
+
165
+
166
+ # Classes to re-use window
167
+ class SSIM(torch.nn.Module):
168
+ def __init__(self, window_size=11, size_average=True, val_range=None):
169
+ super(SSIM, self).__init__()
170
+ self.window_size = window_size
171
+ self.size_average = size_average
172
+ self.val_range = val_range
173
+
174
+ # Assume 3 channel for SSIM
175
+ self.channel = 3
176
+ self.window = create_window(window_size, channel=self.channel)
177
+
178
+ def forward(self, img1, img2):
179
+ (_, channel, _, _) = img1.size()
180
+
181
+ if channel == self.channel and self.window.dtype == img1.dtype:
182
+ window = self.window
183
+ else:
184
+ window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
185
+ self.window = window
186
+ self.channel = channel
187
+
188
+ _ssim = ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
189
+ dssim = (1 - _ssim) / 2
190
+ return dssim
191
+
192
+ class MSSSIM(torch.nn.Module):
193
+ def __init__(self, window_size=11, size_average=True, channel=3):
194
+ super(MSSSIM, self).__init__()
195
+ self.window_size = window_size
196
+ self.size_average = size_average
197
+ self.channel = channel
198
+
199
+ def forward(self, img1, img2):
200
+ return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
Practical-RIFE/model/pytorch_msssim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
Practical-RIFE/model/warplayer.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
5
+ backwarp_tenGrid = {}
6
+
7
+
8
+ def warp(tenInput, tenFlow):
9
+ k = (str(tenFlow.device), str(tenFlow.size()))
10
+ if k not in backwarp_tenGrid:
11
+ tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view(
12
+ 1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
13
+ tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view(
14
+ 1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
15
+ backwarp_tenGrid[k] = torch.cat(
16
+ [tenHorizontal, tenVertical], 1).to(device)
17
+
18
+ tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
19
+ tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
20
+
21
+ g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)
22
+ return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True)
Practical-RIFE/train_log/.DS_Store ADDED
Binary file (6.15 kB). View file
 
Practical-RIFE/train_log/IFNet_HDv3.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from model.warplayer import warp
5
+ # from train_log.refine import *
6
+
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
10
+ return nn.Sequential(
11
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
12
+ padding=padding, dilation=dilation, bias=True),
13
+ nn.LeakyReLU(0.2, True)
14
+ )
15
+
16
+ def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
17
+ return nn.Sequential(
18
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
19
+ padding=padding, dilation=dilation, bias=False),
20
+ nn.BatchNorm2d(out_planes),
21
+ nn.LeakyReLU(0.2, True)
22
+ )
23
+
24
+ class Head(nn.Module):
25
+ def __init__(self):
26
+ super(Head, self).__init__()
27
+ self.cnn0 = nn.Conv2d(3, 32, 3, 2, 1)
28
+ self.cnn1 = nn.Conv2d(32, 32, 3, 1, 1)
29
+ self.cnn2 = nn.Conv2d(32, 32, 3, 1, 1)
30
+ self.cnn3 = nn.ConvTranspose2d(32, 8, 4, 2, 1)
31
+ self.relu = nn.LeakyReLU(0.2, True)
32
+
33
+ def forward(self, x, feat=False):
34
+ x0 = self.cnn0(x)
35
+ x = self.relu(x0)
36
+ x1 = self.cnn1(x)
37
+ x = self.relu(x1)
38
+ x2 = self.cnn2(x)
39
+ x = self.relu(x2)
40
+ x3 = self.cnn3(x)
41
+ if feat:
42
+ return [x0, x1, x2, x3]
43
+ return x3
44
+
45
+ class ResConv(nn.Module):
46
+ def __init__(self, c, dilation=1):
47
+ super(ResConv, self).__init__()
48
+ self.conv = nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1\
49
+ )
50
+ self.beta = nn.Parameter(torch.ones((1, c, 1, 1)), requires_grad=True)
51
+ self.relu = nn.LeakyReLU(0.2, True)
52
+
53
+ def forward(self, x):
54
+ return self.relu(self.conv(x) * self.beta + x)
55
+
56
+ class IFBlock(nn.Module):
57
+ def __init__(self, in_planes, c=64):
58
+ super(IFBlock, self).__init__()
59
+ self.conv0 = nn.Sequential(
60
+ conv(in_planes, c//2, 3, 2, 1),
61
+ conv(c//2, c, 3, 2, 1),
62
+ )
63
+ self.convblock = nn.Sequential(
64
+ ResConv(c),
65
+ ResConv(c),
66
+ ResConv(c),
67
+ ResConv(c),
68
+ ResConv(c),
69
+ ResConv(c),
70
+ ResConv(c),
71
+ ResConv(c),
72
+ )
73
+ self.lastconv = nn.Sequential(
74
+ nn.ConvTranspose2d(c, 4*6, 4, 2, 1),
75
+ nn.PixelShuffle(2)
76
+ )
77
+
78
+ def forward(self, x, flow=None, scale=1):
79
+ x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False)
80
+ if flow is not None:
81
+ flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False) * 1. / scale
82
+ x = torch.cat((x, flow), 1)
83
+ feat = self.conv0(x)
84
+ feat = self.convblock(feat)
85
+ tmp = self.lastconv(feat)
86
+ tmp = F.interpolate(tmp, scale_factor=scale, mode="bilinear", align_corners=False)
87
+ flow = tmp[:, :4] * scale
88
+ mask = tmp[:, 4:5]
89
+ return flow, mask
90
+
91
+ class IFNet(nn.Module):
92
+ def __init__(self):
93
+ super(IFNet, self).__init__()
94
+ self.block0 = IFBlock(7+16, c=192)
95
+ self.block1 = IFBlock(8+4+16, c=128)
96
+ self.block2 = IFBlock(8+4+16, c=96)
97
+ self.block3 = IFBlock(8+4+16, c=64)
98
+ self.encode = Head()
99
+ # self.contextnet = Contextnet()
100
+ # self.unet = Unet()
101
+
102
+ def forward(self, x, timestep=0.5, scale_list=[8, 4, 2, 1], training=False, fastmode=True, ensemble=False):
103
+ if training == False:
104
+ channel = x.shape[1] // 2
105
+ img0 = x[:, :channel]
106
+ img1 = x[:, channel:]
107
+ if not torch.is_tensor(timestep):
108
+ timestep = (x[:, :1].clone() * 0 + 1) * timestep
109
+ else:
110
+ timestep = timestep.repeat(1, 1, img0.shape[2], img0.shape[3])
111
+ f0 = self.encode(img0[:, :3])
112
+ f1 = self.encode(img1[:, :3])
113
+ flow_list = []
114
+ merged = []
115
+ mask_list = []
116
+ warped_img0 = img0
117
+ warped_img1 = img1
118
+ flow = None
119
+ mask = None
120
+ loss_cons = 0
121
+ block = [self.block0, self.block1, self.block2, self.block3]
122
+ for i in range(4):
123
+ if flow is None:
124
+ flow, mask = block[i](torch.cat((img0[:, :3], img1[:, :3], f0, f1, timestep), 1), None, scale=scale_list[i])
125
+ if ensemble:
126
+ f_, m_ = block[i](torch.cat((img1[:, :3], img0[:, :3], f1, f0, 1-timestep), 1), None, scale=scale_list[i])
127
+ flow = (flow + torch.cat((f_[:, 2:4], f_[:, :2]), 1)) / 2
128
+ mask = (mask + (-m_)) / 2
129
+ else:
130
+ wf0 = warp(f0, flow[:, :2])
131
+ wf1 = warp(f1, flow[:, 2:4])
132
+ fd, m0 = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], wf0, wf1, timestep, mask), 1), flow, scale=scale_list[i])
133
+ if ensemble:
134
+ f_, m_ = block[i](torch.cat((warped_img1[:, :3], warped_img0[:, :3], wf1, wf0, 1-timestep, -mask), 1), torch.cat((flow[:, 2:4], flow[:, :2]), 1), scale=scale_list[i])
135
+ fd = (fd + torch.cat((f_[:, 2:4], f_[:, :2]), 1)) / 2
136
+ mask = (m0 + (-m_)) / 2
137
+ else:
138
+ mask = m0
139
+ flow = flow + fd
140
+ mask_list.append(mask)
141
+ flow_list.append(flow)
142
+ warped_img0 = warp(img0, flow[:, :2])
143
+ warped_img1 = warp(img1, flow[:, 2:4])
144
+ merged.append((warped_img0, warped_img1))
145
+ mask = torch.sigmoid(mask)
146
+ merged[3] = (warped_img0 * mask + warped_img1 * (1 - mask))
147
+ if not fastmode:
148
+ print('contextnet is removed')
149
+ '''
150
+ c0 = self.contextnet(img0, flow[:, :2])
151
+ c1 = self.contextnet(img1, flow[:, 2:4])
152
+ tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1)
153
+ res = tmp[:, :3] * 2 - 1
154
+ merged[3] = torch.clamp(merged[3] + res, 0, 1)
155
+ '''
156
+ return flow_list, mask_list[3], merged
Practical-RIFE/train_log/RIFE_HDv3.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from torch.optim import AdamW
5
+ import torch.optim as optim
6
+ import itertools
7
+ from model.warplayer import warp
8
+ from torch.nn.parallel import DistributedDataParallel as DDP
9
+ from train_log.IFNet_HDv3 import *
10
+ import torch.nn.functional as F
11
+ from model.loss import *
12
+
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+
15
+ class Model:
16
+ def __init__(self, local_rank=-1):
17
+ self.flownet = IFNet()
18
+ self.device()
19
+ self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-4)
20
+ self.epe = EPE()
21
+ self.version = 4.8
22
+ # self.vgg = VGGPerceptualLoss().to(device)
23
+ self.sobel = SOBEL()
24
+ if local_rank != -1:
25
+ self.flownet = DDP(self.flownet, device_ids=[local_rank], output_device=local_rank)
26
+
27
+ def train(self):
28
+ self.flownet.train()
29
+
30
+ def eval(self):
31
+ self.flownet.eval()
32
+
33
+ def device(self):
34
+ self.flownet.to(device)
35
+
36
+ def load_model(self, path, rank=0):
37
+ def convert(param):
38
+ if rank == -1:
39
+ return {
40
+ k.replace("module.", ""): v
41
+ for k, v in param.items()
42
+ if "module." in k
43
+ }
44
+ else:
45
+ return param
46
+ if rank <= 0:
47
+ if torch.cuda.is_available():
48
+ self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path))), False)
49
+ else:
50
+ self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path), map_location ='cpu')), False)
51
+
52
+ def save_model(self, path, rank=0):
53
+ if rank == 0:
54
+ torch.save(self.flownet.state_dict(),'{}/flownet.pkl'.format(path))
55
+
56
+ def inference(self, img0, img1, timestep=0.5, scale=1.0):
57
+ imgs = torch.cat((img0, img1), 1)
58
+ scale_list = [8/scale, 4/scale, 2/scale, 1/scale]
59
+ flow, mask, merged = self.flownet(imgs, timestep, scale_list)
60
+ return merged[3]
61
+
62
+ def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
63
+ for param_group in self.optimG.param_groups:
64
+ param_group['lr'] = learning_rate
65
+ img0 = imgs[:, :3]
66
+ img1 = imgs[:, 3:]
67
+ if training:
68
+ self.train()
69
+ else:
70
+ self.eval()
71
+ scale = [8, 4, 2, 1]
72
+ flow, mask, merged = self.flownet(torch.cat((imgs, gt), 1), scale=scale, training=training)
73
+ loss_l1 = (merged[3] - gt).abs().mean()
74
+ loss_smooth = self.sobel(flow[3], flow[3]*0).mean()
75
+ # loss_vgg = self.vgg(merged[2], gt)
76
+ if training:
77
+ self.optimG.zero_grad()
78
+ loss_G = loss_l1 + loss_cons + loss_smooth * 0.1
79
+ loss_G.backward()
80
+ self.optimG.step()
81
+ else:
82
+ flow_teacher = flow[2]
83
+ return merged[3], {
84
+ 'mask': mask,
85
+ 'flow': flow[3][:, :2],
86
+ 'loss_l1': loss_l1,
87
+ 'loss_cons': loss_cons,
88
+ 'loss_smooth': loss_smooth,
89
+ }
Practical-RIFE/train_log/__pycache__/IFNet_HDv3.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
Practical-RIFE/train_log/__pycache__/RIFE_HDv3.cpython-310.pyc ADDED
Binary file (3.57 kB). View file
 
Practical-RIFE/train_log/flownet.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1ee3186270312a38316e4d53c77b31a60062cfa5636e13d6f0a1dd89bb7b128
3
+ size 21508207
Practical-RIFE/train_log/refine.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from torch.optim import AdamW
5
+ import torch.optim as optim
6
+ import itertools
7
+ from model.warplayer import warp
8
+ from torch.nn.parallel import DistributedDataParallel as DDP
9
+ import torch.nn.functional as F
10
+
11
+ device = torch.device("cuda")
12
+
13
+ def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
14
+ return nn.Sequential(
15
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
16
+ padding=padding, dilation=dilation, bias=True),
17
+ nn.LeakyReLU(0.2, True)
18
+ )
19
+
20
+ def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
21
+ return nn.Sequential(
22
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
23
+ padding=padding, dilation=dilation, bias=True),
24
+ )
25
+
26
+ def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
27
+ return nn.Sequential(
28
+ torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes, kernel_size=4, stride=2, padding=1, bias=True),
29
+ nn.LeakyReLU(0.2, True)
30
+ )
31
+
32
+ class Conv2(nn.Module):
33
+ def __init__(self, in_planes, out_planes, stride=2):
34
+ super(Conv2, self).__init__()
35
+ self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
36
+ self.conv2 = conv(out_planes, out_planes, 3, 1, 1)
37
+
38
+ def forward(self, x):
39
+ x = self.conv1(x)
40
+ x = self.conv2(x)
41
+ return x
42
+
43
+ c = 16
44
+ class Contextnet(nn.Module):
45
+ def __init__(self):
46
+ super(Contextnet, self).__init__()
47
+ self.conv1 = Conv2(3, c)
48
+ self.conv2 = Conv2(c, 2*c)
49
+ self.conv3 = Conv2(2*c, 4*c)
50
+ self.conv4 = Conv2(4*c, 8*c)
51
+
52
+ def forward(self, x, flow):
53
+ x = self.conv1(x)
54
+ flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
55
+ f1 = warp(x, flow)
56
+ x = self.conv2(x)
57
+ flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
58
+ f2 = warp(x, flow)
59
+ x = self.conv3(x)
60
+ flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
61
+ f3 = warp(x, flow)
62
+ x = self.conv4(x)
63
+ flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
64
+ f4 = warp(x, flow)
65
+ return [f1, f2, f3, f4]
66
+
67
+ class Unet(nn.Module):
68
+ def __init__(self):
69
+ super(Unet, self).__init__()
70
+ self.down0 = Conv2(17, 2*c)
71
+ self.down1 = Conv2(4*c, 4*c)
72
+ self.down2 = Conv2(8*c, 8*c)
73
+ self.down3 = Conv2(16*c, 16*c)
74
+ self.up0 = deconv(32*c, 8*c)
75
+ self.up1 = deconv(16*c, 4*c)
76
+ self.up2 = deconv(8*c, 2*c)
77
+ self.up3 = deconv(4*c, c)
78
+ self.conv = nn.Conv2d(c, 3, 3, 1, 1)
79
+
80
+ def forward(self, img0, img1, warped_img0, warped_img1, mask, flow, c0, c1):
81
+ s0 = self.down0(torch.cat((img0, img1, warped_img0, warped_img1, mask, flow), 1))
82
+ s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1))
83
+ s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1))
84
+ s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1))
85
+ x = self.up0(torch.cat((s3, c0[3], c1[3]), 1))
86
+ x = self.up1(torch.cat((x, s2), 1))
87
+ x = self.up2(torch.cat((x, s1), 1))
88
+ x = self.up3(torch.cat((x, s0), 1))
89
+ x = self.conv(x)
90
+ return torch.sigmoid(x)
README.md CHANGED
@@ -1,10 +1,156 @@
1
- ---
2
- license: apache-2.0
3
- ---
4
 
5
- Download Moore-AnimateAnyone weights by:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- ```bash
8
- git lfs install
9
- git clone https://huggingface.co/patrolli/AnimateAnyone
10
- ```
 
1
+ # roop-unleashed
 
 
2
 
3
+ [Changelog](#changelog) [Usage](#usage) • [Wiki](https://github.com/C0untFloyd/roop-unleashed/wiki)
4
+
5
+
6
+ Uncensored Deepfakes for images and videos without training and an easy-to-use GUI.
7
+
8
+
9
+ ![Screen](https://github.com/C0untFloyd/roop-unleashed/assets/131583554/6ee6860d-efbe-4337-8c62-a67598863637)
10
+
11
+ ### Features
12
+
13
+ - Platform-independant Browser GUI
14
+ - Selection of multiple input/output faces in one go
15
+ - Many different swapping modes, first detected, face selections, by gender
16
+ - Batch processing of images/videos
17
+ - Masking of face occluders using text prompts or automatically
18
+ - Optional Face Upscaler/Restoration using different enhancers
19
+ - Preview swapping from different video frames
20
+ - Live Fake Cam using your webcam
21
+ - Extras Tab for cutting videos etc.
22
+ - Settings - storing configuration for next session
23
+ - Theme Support
24
+
25
+ and lots more...
26
+
27
+
28
+ ## Disclaimer
29
+
30
+ This project is for technical and academic use only.
31
+ Users of this software are expected to use this software responsibly while abiding the local law. If a face of a real person is being used, users are suggested to get consent from the concerned person and clearly mention that it is a deepfake when posting content online. Developers of this software will not be responsible for actions of end-users.
32
+ **Please do not apply it to illegal and unethical scenarios.**
33
+
34
+ In the event of violation of the legal and ethical requirements of the user's country or region, this code repository is exempt from liability
35
+
36
+ ### Installation
37
+
38
+ Please refer to the [wiki](https://github.com/C0untFloyd/roop-unleashed/wiki).
39
+
40
+
41
+
42
+
43
+ ### Usage
44
+
45
+ - Windows: run the `windows_run.bat` from the Installer.
46
+ - Linux: `python run.py`
47
+
48
+ <a target="_blank" href="https://colab.research.google.com/github/C0untFloyd/roop-unleashed/blob/main/roop-unleashed.ipynb">
49
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
50
+ </a>
51
+
52
+
53
+ Additional commandline arguments are currently unsupported and settings should be done via the UI.
54
+
55
+ > Note: When you run this program for the first time, it will download some models roughly ~2Gb in size.
56
+
57
+
58
+
59
+
60
+ ### Changelog
61
+
62
+ **22.04.2024** v3.9.0
63
+
64
+ - Bugfix: Face detection bounding box corrupt values at weird angles
65
+ - Rewrote mask previewing to work with every model
66
+ - Switching mask engines toggles text interactivity
67
+ - Clearing target files, resets face selection dropdown
68
+ - Massive rewrite of swapping architecture, needed for xseg implementation
69
+ - Added DFL Xseg Support for partial face occlusion
70
+ - Face masking only runs when there is a face detected
71
+ - Removed unnecessary toggle checkbox for text masking
72
+
73
+
74
+ **22.03.2024** v3.6.5
75
+
76
+ - Bugfix: Installer pulling latest update on first installation
77
+ - Bugfix: Regression issue, blurring/erosion missing from face swap
78
+ - Exposed erosion and blur amounts to UI
79
+ - Using same values for manual masking too
80
+
81
+
82
+ **20.03.2024** v3.6.3
83
+
84
+ - Bugfix: Workaround for Gradio Slider Change Bug
85
+ - Bugfix: CSS Styling to fix Gradio Image Height Bug
86
+ - Made face swapping mask offsets resolution independant
87
+ - Show offset mask as overlay
88
+ - Changed layout for masking
89
+
90
+
91
+ **18.03.2024** v3.6.0
92
+
93
+ - Updated to Gradio 4.21.0 - requiring many changes under the hood
94
+ - New manual masking (draw the mask yourself)
95
+ - Extras Tab, streamlined cutting/joining videos
96
+ - Re-added face selection by gender (on-demand loading, default turned off)
97
+ - Removed unnecessary activate live-cam option
98
+ - Added time info to preview frame and changed frame slider event to allow faster changes
99
+
100
+
101
+ **10.03.2024** v3.5.5
102
+
103
+ - Bugfix: Installer Path Env
104
+ - Bugfix: file attributes
105
+ - Video processing checks for presence of ffmpeg and displays warning if not found
106
+ - Removed gender + age detection to speed up processing. Option removed from UI
107
+ - Replaced restoreformer with restoreformer++
108
+ - Live Cam recoded to run separate from virtual cam and without blocking controls
109
+ - Swapping with only 1 target face allows selecting from several input faces
110
+
111
+
112
+
113
+ **08.01.2024** v3.5.0
114
+
115
+ - Bugfix: wrong access options when creating folders
116
+ - New auto rotation of horizontal faces, fixing bad landmark positions (expanded on ![PR 364](https://github.com/C0untFloyd/roop-unleashed/pull/364))
117
+ - Simple VR Option for stereo Images/Movies, best used in selected face mode
118
+ - Added RestoreFormer Enhancer - https://github.com/wzhouxiff/RestoreFormer
119
+ - Bumped up package versions for onnx/Torch etc.
120
+
121
+
122
+ **16.10.2023** v3.3.4
123
+
124
+ **11.8.2023** v2.7.0
125
+
126
+ Initial Gradio Version - old TkInter Version now deprecated
127
+
128
+ - Re-added unified padding to face enhancers
129
+ - Fixed DMDNet for all resolutions
130
+ - Selecting target face now automatically switches swapping mode to selected
131
+ - GPU providers are correctly set using the GUI (needs restart currently)
132
+ - Local output folder can be opened from page
133
+ - Unfinished extras functions disabled for now
134
+ - Installer checks out specific commit, allowing to go back to first install
135
+ - Updated readme for new gradio version
136
+ - Updated Colab
137
+
138
+
139
+ # Acknowledgements
140
+
141
+ Lots of ideas, code or pre-trained models borrowed from the following projects:
142
+
143
+ https://github.com/deepinsight/insightface<br />
144
+ https://github.com/s0md3v/roop<br />
145
+ https://github.com/AUTOMATIC1111/stable-diffusion-webui<br />
146
+ https://github.com/Hillobar/Rope<br />
147
+ https://github.com/TencentARC/GFPGAN<br />
148
+ https://github.com/kadirnar/codeformer-pip<br />
149
+ https://github.com/csxmli2016/DMDNet<br />
150
+ https://github.com/glucauze/sd-webui-faceswaplab<br />
151
+ https://github.com/ykk648/face_power<br />
152
+
153
+ <br />
154
+ <br />
155
+ Thanks to all developers!
156
 
 
 
 
 
__pycache__/handler.cpython-310.pyc CHANGED
Binary files a/__pycache__/handler.cpython-310.pyc and b/__pycache__/handler.cpython-310.pyc differ
 
__pycache__/settings.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
clip/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .clip import *
clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
clip/clip.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+ from typing import Any, Union, List
6
+
7
+ import torch
8
+ from PIL import Image
9
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
10
+ from tqdm import tqdm
11
+
12
+ from .model import build_model
13
+ from .simple_tokenizer import SimpleTokenizer as _Tokenizer
14
+
15
+ try:
16
+ from torchvision.transforms import InterpolationMode
17
+ BICUBIC = InterpolationMode.BICUBIC
18
+ except ImportError:
19
+ BICUBIC = Image.BICUBIC
20
+
21
+
22
+
23
+ __all__ = ["available_models", "load", "tokenize"]
24
+ _tokenizer = _Tokenizer()
25
+
26
+ _MODELS = {
27
+ "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
28
+ "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
29
+ "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
30
+ "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
31
+ "RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
32
+ "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
33
+ "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
34
+ "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
35
+ "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
36
+ }
37
+
38
+
39
+ def _download(url: str, root: str):
40
+ os.makedirs(root, exist_ok=True)
41
+ filename = os.path.basename(url)
42
+
43
+ expected_sha256 = url.split("/")[-2]
44
+ download_target = os.path.join(root, filename)
45
+
46
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
47
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
48
+
49
+ if os.path.isfile(download_target):
50
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
51
+ return download_target
52
+ else:
53
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
54
+
55
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
56
+ with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
57
+ while True:
58
+ buffer = source.read(8192)
59
+ if not buffer:
60
+ break
61
+
62
+ output.write(buffer)
63
+ loop.update(len(buffer))
64
+
65
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
66
+ raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
67
+
68
+ return download_target
69
+
70
+
71
+ def _convert_image_to_rgb(image):
72
+ return image.convert("RGB")
73
+
74
+
75
+ def _transform(n_px):
76
+ return Compose([
77
+ Resize(n_px, interpolation=BICUBIC),
78
+ CenterCrop(n_px),
79
+ _convert_image_to_rgb,
80
+ ToTensor(),
81
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
82
+ ])
83
+
84
+
85
+ def available_models() -> List[str]:
86
+ """Returns the names of available CLIP models"""
87
+ return list(_MODELS.keys())
88
+
89
+
90
+ def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
91
+ """Load a CLIP model
92
+
93
+ Parameters
94
+ ----------
95
+ name : str
96
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
97
+
98
+ device : Union[str, torch.device]
99
+ The device to put the loaded model
100
+
101
+ jit : bool
102
+ Whether to load the optimized JIT model or more hackable non-JIT model (default).
103
+
104
+ download_root: str
105
+ path to download the model files; by default, it uses "~/.cache/clip"
106
+
107
+ Returns
108
+ -------
109
+ model : torch.nn.Module
110
+ The CLIP model
111
+
112
+ preprocess : Callable[[PIL.Image], torch.Tensor]
113
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
114
+ """
115
+ if name in _MODELS:
116
+ model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
117
+ elif os.path.isfile(name):
118
+ model_path = name
119
+ else:
120
+ raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
121
+
122
+ with open(model_path, 'rb') as opened_file:
123
+ try:
124
+ # loading JIT archive
125
+ model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
126
+ state_dict = None
127
+ except RuntimeError:
128
+ # loading saved state dict
129
+ if jit:
130
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
131
+ jit = False
132
+ state_dict = torch.load(opened_file, map_location="cpu")
133
+
134
+ if not jit:
135
+ model = build_model(state_dict or model.state_dict()).to(device)
136
+ if str(device) == "cpu":
137
+ model.float()
138
+ return model, _transform(model.visual.input_resolution)
139
+
140
+ # patch the device names
141
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
142
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
143
+
144
+ def _node_get(node: torch._C.Node, key: str):
145
+ """Gets attributes of a node which is polymorphic over return type.
146
+
147
+ From https://github.com/pytorch/pytorch/pull/82628
148
+ """
149
+ sel = node.kindOf(key)
150
+ return getattr(node, sel)(key)
151
+
152
+ def patch_device(module):
153
+ try:
154
+ graphs = [module.graph] if hasattr(module, "graph") else []
155
+ except RuntimeError:
156
+ graphs = []
157
+
158
+ if hasattr(module, "forward1"):
159
+ graphs.append(module.forward1.graph)
160
+
161
+ for graph in graphs:
162
+ for node in graph.findAllNodes("prim::Constant"):
163
+ if "value" in node.attributeNames() and str(_node_get(node, "value")).startswith("cuda"):
164
+ node.copyAttributes(device_node)
165
+
166
+ model.apply(patch_device)
167
+ patch_device(model.encode_image)
168
+ patch_device(model.encode_text)
169
+
170
+ # patch dtype to float32 on CPU
171
+ if str(device) == "cpu":
172
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
173
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
174
+ float_node = float_input.node()
175
+
176
+ def patch_float(module):
177
+ try:
178
+ graphs = [module.graph] if hasattr(module, "graph") else []
179
+ except RuntimeError:
180
+ graphs = []
181
+
182
+ if hasattr(module, "forward1"):
183
+ graphs.append(module.forward1.graph)
184
+
185
+ for graph in graphs:
186
+ for node in graph.findAllNodes("aten::to"):
187
+ inputs = list(node.inputs())
188
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
189
+ if _node_get(inputs[i].node(), "value") == 5:
190
+ inputs[i].node().copyAttributes(float_node)
191
+
192
+ model.apply(patch_float)
193
+ patch_float(model.encode_image)
194
+ patch_float(model.encode_text)
195
+
196
+ model.float()
197
+
198
+ return model, _transform(model.input_resolution.item())
199
+
200
+
201
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
202
+ """
203
+ Returns the tokenized representation of given input string(s)
204
+
205
+ Parameters
206
+ ----------
207
+ texts : Union[str, List[str]]
208
+ An input string or a list of input strings to tokenize
209
+
210
+ context_length : int
211
+ The context length to use; all CLIP models use 77 as the context length
212
+
213
+ truncate: bool
214
+ Whether to truncate the text in case its encoding is longer than the context length
215
+
216
+ Returns
217
+ -------
218
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
219
+ We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
220
+ """
221
+ if isinstance(texts, str):
222
+ texts = [texts]
223
+
224
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
225
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
226
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
227
+ #if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
228
+ # result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
229
+ #else:
230
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
231
+
232
+ for i, tokens in enumerate(all_tokens):
233
+ if len(tokens) > context_length:
234
+ if truncate:
235
+ tokens = tokens[:context_length]
236
+ tokens[-1] = eot_token
237
+ else:
238
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
239
+ result[i, :len(tokens)] = torch.tensor(tokens)
240
+
241
+ return result
clip/clipseg.py ADDED
@@ -0,0 +1,538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from os.path import basename, dirname, join, isfile
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as nnf
6
+ from torch.nn.modules.activation import ReLU
7
+
8
+
9
+ def get_prompt_list(prompt):
10
+ if prompt == 'plain':
11
+ return ['{}']
12
+ elif prompt == 'fixed':
13
+ return ['a photo of a {}.']
14
+ elif prompt == 'shuffle':
15
+ return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.']
16
+ elif prompt == 'shuffle+':
17
+ return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.',
18
+ 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.',
19
+ 'a bad photo of a {}.', 'a photo of the {}.']
20
+ else:
21
+ raise ValueError('Invalid value for prompt')
22
+
23
+
24
+ def forward_multihead_attention(x, b, with_aff=False, attn_mask=None):
25
+ """
26
+ Simplified version of multihead attention (taken from torch source code but without tons of if clauses).
27
+ The mlp and layer norm come from CLIP.
28
+ x: input.
29
+ b: multihead attention module.
30
+ """
31
+
32
+ x_ = b.ln_1(x)
33
+ q, k, v = nnf.linear(x_, b.attn.in_proj_weight, b.attn.in_proj_bias).chunk(3, dim=-1)
34
+ tgt_len, bsz, embed_dim = q.size()
35
+
36
+ head_dim = embed_dim // b.attn.num_heads
37
+ scaling = float(head_dim) ** -0.5
38
+
39
+ q = q.contiguous().view(tgt_len, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
40
+ k = k.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
41
+ v = v.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
42
+
43
+ q = q * scaling
44
+
45
+ attn_output_weights = torch.bmm(q, k.transpose(1, 2)) # n_heads * batch_size, tokens^2, tokens^2
46
+ if attn_mask is not None:
47
+
48
+
49
+ attn_mask_type, attn_mask = attn_mask
50
+ n_heads = attn_output_weights.size(0) // attn_mask.size(0)
51
+ attn_mask = attn_mask.repeat(n_heads, 1)
52
+
53
+ if attn_mask_type == 'cls_token':
54
+ # the mask only affects similarities compared to the readout-token.
55
+ attn_output_weights[:, 0, 1:] = attn_output_weights[:, 0, 1:] * attn_mask[None,...]
56
+ # attn_output_weights[:, 0, 0] = 0*attn_output_weights[:, 0, 0]
57
+
58
+ if attn_mask_type == 'all':
59
+ # print(attn_output_weights.shape, attn_mask[:, None].shape)
60
+ attn_output_weights[:, 1:, 1:] = attn_output_weights[:, 1:, 1:] * attn_mask[:, None]
61
+
62
+
63
+ attn_output_weights = torch.softmax(attn_output_weights, dim=-1)
64
+
65
+ attn_output = torch.bmm(attn_output_weights, v)
66
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
67
+ attn_output = b.attn.out_proj(attn_output)
68
+
69
+ x = x + attn_output
70
+ x = x + b.mlp(b.ln_2(x))
71
+
72
+ if with_aff:
73
+ return x, attn_output_weights
74
+ else:
75
+ return x
76
+
77
+
78
+ class CLIPDenseBase(nn.Module):
79
+
80
+ def __init__(self, version, reduce_cond, reduce_dim, prompt, n_tokens):
81
+ super().__init__()
82
+
83
+ import clip
84
+
85
+ # prec = torch.FloatTensor
86
+ self.clip_model, _ = clip.load(version, device='cpu', jit=False)
87
+ self.model = self.clip_model.visual
88
+
89
+ # if not None, scale conv weights such that we obtain n_tokens.
90
+ self.n_tokens = n_tokens
91
+
92
+ for p in self.clip_model.parameters():
93
+ p.requires_grad_(False)
94
+
95
+ # conditional
96
+ if reduce_cond is not None:
97
+ self.reduce_cond = nn.Linear(512, reduce_cond)
98
+ for p in self.reduce_cond.parameters():
99
+ p.requires_grad_(False)
100
+ else:
101
+ self.reduce_cond = None
102
+
103
+ self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
104
+ self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
105
+
106
+ self.reduce = nn.Linear(768, reduce_dim)
107
+
108
+ self.prompt_list = get_prompt_list(prompt)
109
+
110
+ # precomputed prompts
111
+ import pickle
112
+ if isfile('precomputed_prompt_vectors.pickle'):
113
+ precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb'))
114
+ self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()}
115
+ else:
116
+ self.precomputed_prompts = dict()
117
+
118
+ def rescaled_pos_emb(self, new_size):
119
+ assert len(new_size) == 2
120
+
121
+ a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape)
122
+ b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T
123
+ return torch.cat([self.model.positional_embedding[:1], b])
124
+
125
+ def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None):
126
+
127
+
128
+ with torch.no_grad():
129
+
130
+ inp_size = x_inp.shape[2:]
131
+
132
+ if self.n_tokens is not None:
133
+ stride2 = x_inp.shape[2] // self.n_tokens
134
+ conv_weight2 = nnf.interpolate(self.model.conv1.weight, (stride2, stride2), mode='bilinear', align_corners=True)
135
+ x = nnf.conv2d(x_inp, conv_weight2, bias=self.model.conv1.bias, stride=stride2, dilation=self.model.conv1.dilation)
136
+ else:
137
+ x = self.model.conv1(x_inp) # shape = [*, width, grid, grid]
138
+
139
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
140
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
141
+
142
+ x = torch.cat([self.model.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
143
+
144
+ standard_n_tokens = 50 if self.model.conv1.kernel_size[0] == 32 else 197
145
+
146
+ if x.shape[1] != standard_n_tokens:
147
+ new_shape = int(math.sqrt(x.shape[1]-1))
148
+ x = x + self.rescaled_pos_emb((new_shape, new_shape)).to(x.dtype)[None,:,:]
149
+ else:
150
+ x = x + self.model.positional_embedding.to(x.dtype)
151
+
152
+ x = self.model.ln_pre(x)
153
+
154
+ x = x.permute(1, 0, 2) # NLD -> LND
155
+
156
+ activations, affinities = [], []
157
+ for i, res_block in enumerate(self.model.transformer.resblocks):
158
+
159
+ if mask is not None:
160
+ mask_layer, mask_type, mask_tensor = mask
161
+ if mask_layer == i or mask_layer == 'all':
162
+ # import ipdb; ipdb.set_trace()
163
+ size = int(math.sqrt(x.shape[0] - 1))
164
+
165
+ attn_mask = (mask_type, nnf.interpolate(mask_tensor.unsqueeze(1).float(), (size, size)).view(mask_tensor.shape[0], size * size))
166
+
167
+ else:
168
+ attn_mask = None
169
+ else:
170
+ attn_mask = None
171
+
172
+ x, aff_per_head = forward_multihead_attention(x, res_block, with_aff=True, attn_mask=attn_mask)
173
+
174
+ if i in extract_layers:
175
+ affinities += [aff_per_head]
176
+
177
+ #if self.n_tokens is not None:
178
+ # activations += [nnf.interpolate(x, inp_size, mode='bilinear', align_corners=True)]
179
+ #else:
180
+ activations += [x]
181
+
182
+ if len(extract_layers) > 0 and i == max(extract_layers) and skip:
183
+ print('early skip')
184
+ break
185
+
186
+ x = x.permute(1, 0, 2) # LND -> NLD
187
+ x = self.model.ln_post(x[:, 0, :])
188
+
189
+ if self.model.proj is not None:
190
+ x = x @ self.model.proj
191
+
192
+ return x, activations, affinities
193
+
194
+ def sample_prompts(self, words, prompt_list=None):
195
+
196
+ prompt_list = prompt_list if prompt_list is not None else self.prompt_list
197
+
198
+ prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
199
+ prompts = [prompt_list[i] for i in prompt_indices]
200
+ return [promt.format(w) for promt, w in zip(prompts, words)]
201
+
202
+ def get_cond_vec(self, conditional, batch_size):
203
+ # compute conditional from a single string
204
+ if conditional is not None and type(conditional) == str:
205
+ cond = self.compute_conditional(conditional)
206
+ cond = cond.repeat(batch_size, 1)
207
+
208
+ # compute conditional from string list/tuple
209
+ elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str:
210
+ assert len(conditional) == batch_size
211
+ cond = self.compute_conditional(conditional)
212
+
213
+ # use conditional directly
214
+ elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2:
215
+ cond = conditional
216
+
217
+ # compute conditional from image
218
+ elif conditional is not None and type(conditional) == torch.Tensor:
219
+ with torch.no_grad():
220
+ cond, _, _ = self.visual_forward(conditional)
221
+ else:
222
+ raise ValueError('invalid conditional')
223
+ return cond
224
+
225
+ def compute_conditional(self, conditional):
226
+ import clip
227
+
228
+ dev = next(self.parameters()).device
229
+
230
+ if type(conditional) in {list, tuple}:
231
+ text_tokens = clip.tokenize(conditional).to(dev)
232
+ cond = self.clip_model.encode_text(text_tokens)
233
+ else:
234
+ if conditional in self.precomputed_prompts:
235
+ cond = self.precomputed_prompts[conditional].float().to(dev)
236
+ else:
237
+ text_tokens = clip.tokenize([conditional]).to(dev)
238
+ cond = self.clip_model.encode_text(text_tokens)[0]
239
+
240
+ if self.shift_vector is not None:
241
+ return cond + self.shift_vector
242
+ else:
243
+ return cond
244
+
245
+
246
+ def clip_load_untrained(version):
247
+ assert version == 'ViT-B/16'
248
+ from clip.model import CLIP
249
+ from clip.clip import _MODELS, _download
250
+ model = torch.jit.load(_download(_MODELS['ViT-B/16'])).eval()
251
+ state_dict = model.state_dict()
252
+
253
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
254
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
255
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
256
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
257
+ image_resolution = vision_patch_size * grid_size
258
+ embed_dim = state_dict["text_projection"].shape[1]
259
+ context_length = state_dict["positional_embedding"].shape[0]
260
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
261
+ transformer_width = state_dict["ln_final.weight"].shape[0]
262
+ transformer_heads = transformer_width // 64
263
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
264
+
265
+ return CLIP(embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size,
266
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers)
267
+
268
+
269
+ class CLIPDensePredT(CLIPDenseBase):
270
+
271
+ def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed',
272
+ extra_blocks=0, reduce_cond=None, fix_shift=False,
273
+ learn_trans_conv_only=False, limit_to_clip_only=False, upsample=False,
274
+ add_calibration=False, rev_activations=False, trans_conv=None, n_tokens=None, complex_trans_conv=False):
275
+
276
+ super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
277
+ # device = 'cpu'
278
+
279
+ self.extract_layers = extract_layers
280
+ self.cond_layer = cond_layer
281
+ self.limit_to_clip_only = limit_to_clip_only
282
+ self.process_cond = None
283
+ self.rev_activations = rev_activations
284
+
285
+ depth = len(extract_layers)
286
+
287
+ if add_calibration:
288
+ self.calibration_conds = 1
289
+
290
+ self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None
291
+
292
+ self.add_activation1 = True
293
+
294
+ self.version = version
295
+
296
+ self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version]
297
+
298
+ if fix_shift:
299
+ # self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'clip_text_shift_vector.pth')), requires_grad=False)
300
+ self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'shift_text_to_vis.pth')), requires_grad=False)
301
+ # self.shift_vector = nn.Parameter(-1*torch.load(join(dirname(basename(__file__)), 'shift2.pth')), requires_grad=False)
302
+ else:
303
+ self.shift_vector = None
304
+
305
+ if trans_conv is None:
306
+ trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version]
307
+ else:
308
+ # explicitly define transposed conv kernel size
309
+ trans_conv_ks = (trans_conv, trans_conv)
310
+
311
+ if not complex_trans_conv:
312
+ self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
313
+ else:
314
+ assert trans_conv_ks[0] == trans_conv_ks[1]
315
+
316
+ tp_kernels = (trans_conv_ks[0] // 4, trans_conv_ks[0] // 4)
317
+
318
+ self.trans_conv = nn.Sequential(
319
+ nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1),
320
+ nn.ReLU(),
321
+ nn.ConvTranspose2d(reduce_dim, reduce_dim // 2, kernel_size=tp_kernels[0], stride=tp_kernels[0]),
322
+ nn.ReLU(),
323
+ nn.ConvTranspose2d(reduce_dim // 2, 1, kernel_size=tp_kernels[1], stride=tp_kernels[1]),
324
+ )
325
+
326
+ # self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
327
+
328
+ assert len(self.extract_layers) == depth
329
+
330
+ self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)])
331
+ self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))])
332
+ self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)])
333
+
334
+ # refinement and trans conv
335
+
336
+ if learn_trans_conv_only:
337
+ for p in self.parameters():
338
+ p.requires_grad_(False)
339
+
340
+ for p in self.trans_conv.parameters():
341
+ p.requires_grad_(True)
342
+
343
+ self.prompt_list = get_prompt_list(prompt)
344
+
345
+
346
+ def forward(self, inp_image, conditional=None, return_features=False, mask=None):
347
+
348
+ assert type(return_features) == bool
349
+
350
+ inp_image = inp_image.to(self.model.positional_embedding.device)
351
+
352
+ if mask is not None:
353
+ raise ValueError('mask not supported')
354
+
355
+ # x_inp = normalize(inp_image)
356
+ x_inp = inp_image
357
+
358
+ bs, dev = inp_image.shape[0], x_inp.device
359
+
360
+ cond = self.get_cond_vec(conditional, bs)
361
+
362
+ visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers))
363
+
364
+ activation1 = activations[0]
365
+ activations = activations[1:]
366
+
367
+ _activations = activations[::-1] if not self.rev_activations else activations
368
+
369
+ a = None
370
+ for i, (activation, block, reduce) in enumerate(zip(_activations, self.blocks, self.reduces)):
371
+
372
+ if a is not None:
373
+ a = reduce(activation) + a
374
+ else:
375
+ a = reduce(activation)
376
+
377
+ if i == self.cond_layer:
378
+ if self.reduce_cond is not None:
379
+ cond = self.reduce_cond(cond)
380
+
381
+ a = self.film_mul(cond) * a + self.film_add(cond)
382
+
383
+ a = block(a)
384
+
385
+ for block in self.extra_blocks:
386
+ a = a + block(a)
387
+
388
+ a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
389
+
390
+ size = int(math.sqrt(a.shape[2]))
391
+
392
+ a = a.view(bs, a.shape[1], size, size)
393
+
394
+ a = self.trans_conv(a)
395
+
396
+ if self.n_tokens is not None:
397
+ a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear', align_corners=True)
398
+
399
+ if self.upsample_proj is not None:
400
+ a = self.upsample_proj(a)
401
+ a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear')
402
+
403
+ if return_features:
404
+ return a, visual_q, cond, [activation1] + activations
405
+ else:
406
+ return a,
407
+
408
+
409
+
410
+ class CLIPDensePredTMasked(CLIPDensePredT):
411
+
412
+ def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4,
413
+ prompt='fixed', extra_blocks=0, reduce_cond=None, fix_shift=False, learn_trans_conv_only=False,
414
+ refine=None, limit_to_clip_only=False, upsample=False, add_calibration=False, n_tokens=None):
415
+
416
+ super().__init__(version=version, extract_layers=extract_layers, cond_layer=cond_layer, reduce_dim=reduce_dim,
417
+ n_heads=n_heads, prompt=prompt, extra_blocks=extra_blocks, reduce_cond=reduce_cond,
418
+ fix_shift=fix_shift, learn_trans_conv_only=learn_trans_conv_only,
419
+ limit_to_clip_only=limit_to_clip_only, upsample=upsample, add_calibration=add_calibration,
420
+ n_tokens=n_tokens)
421
+
422
+ def visual_forward_masked(self, img_s, seg_s):
423
+ return super().visual_forward(img_s, mask=('all', 'cls_token', seg_s))
424
+
425
+ def forward(self, img_q, cond_or_img_s, seg_s=None, return_features=False):
426
+
427
+ if seg_s is None:
428
+ cond = cond_or_img_s
429
+ else:
430
+ img_s = cond_or_img_s
431
+
432
+ with torch.no_grad():
433
+ cond, _, _ = self.visual_forward_masked(img_s, seg_s)
434
+
435
+ return super().forward(img_q, cond, return_features=return_features)
436
+
437
+
438
+
439
+ class CLIPDenseBaseline(CLIPDenseBase):
440
+
441
+ def __init__(self, version='ViT-B/32', cond_layer=0,
442
+ extract_layer=9, reduce_dim=128, reduce2_dim=None, prompt='fixed',
443
+ reduce_cond=None, limit_to_clip_only=False, n_tokens=None):
444
+
445
+ super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
446
+ device = 'cpu'
447
+
448
+ # self.cond_layer = cond_layer
449
+ self.extract_layer = extract_layer
450
+ self.limit_to_clip_only = limit_to_clip_only
451
+ self.shift_vector = None
452
+
453
+ self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version]
454
+
455
+ assert reduce2_dim is not None
456
+
457
+ self.reduce2 = nn.Sequential(
458
+ nn.Linear(reduce_dim, reduce2_dim),
459
+ nn.ReLU(),
460
+ nn.Linear(reduce2_dim, reduce_dim)
461
+ )
462
+
463
+ trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version]
464
+ self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
465
+
466
+
467
+ def forward(self, inp_image, conditional=None, return_features=False):
468
+
469
+ inp_image = inp_image.to(self.model.positional_embedding.device)
470
+
471
+ # x_inp = normalize(inp_image)
472
+ x_inp = inp_image
473
+
474
+ bs, dev = inp_image.shape[0], x_inp.device
475
+
476
+ cond = self.get_cond_vec(conditional, bs)
477
+
478
+ visual_q, activations, affinities = self.visual_forward(x_inp, extract_layers=[self.extract_layer])
479
+
480
+ a = activations[0]
481
+ a = self.reduce(a)
482
+ a = self.film_mul(cond) * a + self.film_add(cond)
483
+
484
+ if self.reduce2 is not None:
485
+ a = self.reduce2(a)
486
+
487
+ # the original model would execute a transformer block here
488
+
489
+ a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
490
+
491
+ size = int(math.sqrt(a.shape[2]))
492
+
493
+ a = a.view(bs, a.shape[1], size, size)
494
+ a = self.trans_conv(a)
495
+
496
+ if return_features:
497
+ return a, visual_q, cond, activations
498
+ else:
499
+ return a,
500
+
501
+
502
+ class CLIPSegMultiLabel(nn.Module):
503
+
504
+ def __init__(self, model) -> None:
505
+ super().__init__()
506
+
507
+ from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC
508
+
509
+ self.pascal_classes = VOC
510
+
511
+ from clip.clipseg import CLIPDensePredT
512
+ from general_utils import load_model
513
+ # self.clipseg = load_model('rd64-vit16-neg0.2-phrasecut', strict=False)
514
+ self.clipseg = load_model(model, strict=False)
515
+
516
+ self.clipseg.eval()
517
+
518
+ def forward(self, x):
519
+
520
+ bs = x.shape[0]
521
+ out = torch.ones(21, bs, 352, 352).to(x.device) * -10
522
+
523
+ for class_id, class_name in enumerate(self.pascal_classes):
524
+
525
+ fac = 3 if class_name == 'background' else 1
526
+
527
+ with torch.no_grad():
528
+ pred = torch.sigmoid(self.clipseg(x, class_name)[0][:,0]) * fac
529
+
530
+ out[class_id] += pred
531
+
532
+
533
+ out = out.permute(1, 0, 2, 3)
534
+
535
+ return out
536
+
537
+ # construct output tensor
538
+
clip/model.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from typing import Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+
9
+
10
+ class Bottleneck(nn.Module):
11
+ expansion = 4
12
+
13
+ def __init__(self, inplanes, planes, stride=1):
14
+ super().__init__()
15
+
16
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
17
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
18
+ self.bn1 = nn.BatchNorm2d(planes)
19
+ self.relu1 = nn.ReLU(inplace=True)
20
+
21
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
22
+ self.bn2 = nn.BatchNorm2d(planes)
23
+ self.relu2 = nn.ReLU(inplace=True)
24
+
25
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
26
+
27
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
28
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
29
+ self.relu3 = nn.ReLU(inplace=True)
30
+
31
+ self.downsample = None
32
+ self.stride = stride
33
+
34
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
35
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
36
+ self.downsample = nn.Sequential(OrderedDict([
37
+ ("-1", nn.AvgPool2d(stride)),
38
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
39
+ ("1", nn.BatchNorm2d(planes * self.expansion))
40
+ ]))
41
+
42
+ def forward(self, x: torch.Tensor):
43
+ identity = x
44
+
45
+ out = self.relu1(self.bn1(self.conv1(x)))
46
+ out = self.relu2(self.bn2(self.conv2(out)))
47
+ out = self.avgpool(out)
48
+ out = self.bn3(self.conv3(out))
49
+
50
+ if self.downsample is not None:
51
+ identity = self.downsample(x)
52
+
53
+ out += identity
54
+ out = self.relu3(out)
55
+ return out
56
+
57
+
58
+ class AttentionPool2d(nn.Module):
59
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
60
+ super().__init__()
61
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
62
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
63
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
64
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
65
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
66
+ self.num_heads = num_heads
67
+
68
+ def forward(self, x):
69
+ x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
70
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
71
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
72
+ x, _ = F.multi_head_attention_forward(
73
+ query=x[:1], key=x, value=x,
74
+ embed_dim_to_check=x.shape[-1],
75
+ num_heads=self.num_heads,
76
+ q_proj_weight=self.q_proj.weight,
77
+ k_proj_weight=self.k_proj.weight,
78
+ v_proj_weight=self.v_proj.weight,
79
+ in_proj_weight=None,
80
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
81
+ bias_k=None,
82
+ bias_v=None,
83
+ add_zero_attn=False,
84
+ dropout_p=0,
85
+ out_proj_weight=self.c_proj.weight,
86
+ out_proj_bias=self.c_proj.bias,
87
+ use_separate_proj_weight=True,
88
+ training=self.training,
89
+ need_weights=False
90
+ )
91
+ return x.squeeze(0)
92
+
93
+
94
+ class ModifiedResNet(nn.Module):
95
+ """
96
+ A ResNet class that is similar to torchvision's but contains the following changes:
97
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
98
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
99
+ - The final pooling layer is a QKV attention instead of an average pool
100
+ """
101
+
102
+ def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
103
+ super().__init__()
104
+ self.output_dim = output_dim
105
+ self.input_resolution = input_resolution
106
+
107
+ # the 3-layer stem
108
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
109
+ self.bn1 = nn.BatchNorm2d(width // 2)
110
+ self.relu1 = nn.ReLU(inplace=True)
111
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
112
+ self.bn2 = nn.BatchNorm2d(width // 2)
113
+ self.relu2 = nn.ReLU(inplace=True)
114
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
115
+ self.bn3 = nn.BatchNorm2d(width)
116
+ self.relu3 = nn.ReLU(inplace=True)
117
+ self.avgpool = nn.AvgPool2d(2)
118
+
119
+ # residual layers
120
+ self._inplanes = width # this is a *mutable* variable used during construction
121
+ self.layer1 = self._make_layer(width, layers[0])
122
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
123
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
124
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
125
+
126
+ embed_dim = width * 32 # the ResNet feature dimension
127
+ self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
128
+
129
+ def _make_layer(self, planes, blocks, stride=1):
130
+ layers = [Bottleneck(self._inplanes, planes, stride)]
131
+
132
+ self._inplanes = planes * Bottleneck.expansion
133
+ for _ in range(1, blocks):
134
+ layers.append(Bottleneck(self._inplanes, planes))
135
+
136
+ return nn.Sequential(*layers)
137
+
138
+ def forward(self, x):
139
+ def stem(x):
140
+ x = self.relu1(self.bn1(self.conv1(x)))
141
+ x = self.relu2(self.bn2(self.conv2(x)))
142
+ x = self.relu3(self.bn3(self.conv3(x)))
143
+ x = self.avgpool(x)
144
+ return x
145
+
146
+ x = x.type(self.conv1.weight.dtype)
147
+ x = stem(x)
148
+ x = self.layer1(x)
149
+ x = self.layer2(x)
150
+ x = self.layer3(x)
151
+ x = self.layer4(x)
152
+ x = self.attnpool(x)
153
+
154
+ return x
155
+
156
+
157
+ class LayerNorm(nn.LayerNorm):
158
+ """Subclass torch's LayerNorm to handle fp16."""
159
+
160
+ def forward(self, x: torch.Tensor):
161
+ orig_type = x.dtype
162
+ ret = super().forward(x.type(torch.float32))
163
+ return ret.type(orig_type)
164
+
165
+
166
+ class QuickGELU(nn.Module):
167
+ def forward(self, x: torch.Tensor):
168
+ return x * torch.sigmoid(1.702 * x)
169
+
170
+
171
+ class ResidualAttentionBlock(nn.Module):
172
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
173
+ super().__init__()
174
+
175
+ self.attn = nn.MultiheadAttention(d_model, n_head)
176
+ self.ln_1 = LayerNorm(d_model)
177
+ self.mlp = nn.Sequential(OrderedDict([
178
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
179
+ ("gelu", QuickGELU()),
180
+ ("c_proj", nn.Linear(d_model * 4, d_model))
181
+ ]))
182
+ self.ln_2 = LayerNorm(d_model)
183
+ self.attn_mask = attn_mask
184
+
185
+ def attention(self, x: torch.Tensor):
186
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
187
+ return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
188
+
189
+ def forward(self, x: torch.Tensor):
190
+ x = x + self.attention(self.ln_1(x))
191
+ x = x + self.mlp(self.ln_2(x))
192
+ return x
193
+
194
+
195
+ class Transformer(nn.Module):
196
+ def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
197
+ super().__init__()
198
+ self.width = width
199
+ self.layers = layers
200
+ self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
201
+
202
+ def forward(self, x: torch.Tensor):
203
+ return self.resblocks(x)
204
+
205
+
206
+ class VisionTransformer(nn.Module):
207
+ def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
208
+ super().__init__()
209
+ self.input_resolution = input_resolution
210
+ self.output_dim = output_dim
211
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
212
+
213
+ scale = width ** -0.5
214
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
215
+ self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
216
+ self.ln_pre = LayerNorm(width)
217
+
218
+ self.transformer = Transformer(width, layers, heads)
219
+
220
+ self.ln_post = LayerNorm(width)
221
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
222
+
223
+ def forward(self, x: torch.Tensor):
224
+ x = self.conv1(x) # shape = [*, width, grid, grid]
225
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
226
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
227
+ x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
228
+ x = x + self.positional_embedding.to(x.dtype)
229
+ x = self.ln_pre(x)
230
+
231
+ x = x.permute(1, 0, 2) # NLD -> LND
232
+ x = self.transformer(x)
233
+ x = x.permute(1, 0, 2) # LND -> NLD
234
+
235
+ x = self.ln_post(x[:, 0, :])
236
+
237
+ if self.proj is not None:
238
+ x = x @ self.proj
239
+
240
+ return x
241
+
242
+
243
+ class CLIP(nn.Module):
244
+ def __init__(self,
245
+ embed_dim: int,
246
+ # vision
247
+ image_resolution: int,
248
+ vision_layers: Union[Tuple[int, int, int, int], int],
249
+ vision_width: int,
250
+ vision_patch_size: int,
251
+ # text
252
+ context_length: int,
253
+ vocab_size: int,
254
+ transformer_width: int,
255
+ transformer_heads: int,
256
+ transformer_layers: int
257
+ ):
258
+ super().__init__()
259
+
260
+ self.context_length = context_length
261
+
262
+ if isinstance(vision_layers, (tuple, list)):
263
+ vision_heads = vision_width * 32 // 64
264
+ self.visual = ModifiedResNet(
265
+ layers=vision_layers,
266
+ output_dim=embed_dim,
267
+ heads=vision_heads,
268
+ input_resolution=image_resolution,
269
+ width=vision_width
270
+ )
271
+ else:
272
+ vision_heads = vision_width // 64
273
+ self.visual = VisionTransformer(
274
+ input_resolution=image_resolution,
275
+ patch_size=vision_patch_size,
276
+ width=vision_width,
277
+ layers=vision_layers,
278
+ heads=vision_heads,
279
+ output_dim=embed_dim
280
+ )
281
+
282
+ self.transformer = Transformer(
283
+ width=transformer_width,
284
+ layers=transformer_layers,
285
+ heads=transformer_heads,
286
+ attn_mask=self.build_attention_mask()
287
+ )
288
+
289
+ self.vocab_size = vocab_size
290
+ self.token_embedding = nn.Embedding(vocab_size, transformer_width)
291
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
292
+ self.ln_final = LayerNorm(transformer_width)
293
+
294
+ self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
295
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
296
+
297
+ self.initialize_parameters()
298
+
299
+ def initialize_parameters(self):
300
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
301
+ nn.init.normal_(self.positional_embedding, std=0.01)
302
+
303
+ if isinstance(self.visual, ModifiedResNet):
304
+ if self.visual.attnpool is not None:
305
+ std = self.visual.attnpool.c_proj.in_features ** -0.5
306
+ nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
307
+ nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
308
+ nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
309
+ nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
310
+
311
+ for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
312
+ for name, param in resnet_block.named_parameters():
313
+ if name.endswith("bn3.weight"):
314
+ nn.init.zeros_(param)
315
+
316
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
317
+ attn_std = self.transformer.width ** -0.5
318
+ fc_std = (2 * self.transformer.width) ** -0.5
319
+ for block in self.transformer.resblocks:
320
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
321
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
322
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
323
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
324
+
325
+ if self.text_projection is not None:
326
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
327
+
328
+ def build_attention_mask(self):
329
+ # lazily create causal attention mask, with full attention between the vision tokens
330
+ # pytorch uses additive attention mask; fill with -inf
331
+ mask = torch.empty(self.context_length, self.context_length)
332
+ mask.fill_(float("-inf"))
333
+ mask.triu_(1) # zero out the lower diagonal
334
+ return mask
335
+
336
+ @property
337
+ def dtype(self):
338
+ return self.visual.conv1.weight.dtype
339
+
340
+ def encode_image(self, image):
341
+ return self.visual(image.type(self.dtype))
342
+
343
+ def encode_text(self, text):
344
+ x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
345
+
346
+ x = x + self.positional_embedding.type(self.dtype)
347
+ x = x.permute(1, 0, 2) # NLD -> LND
348
+ x = self.transformer(x)
349
+ x = x.permute(1, 0, 2) # LND -> NLD
350
+ x = self.ln_final(x).type(self.dtype)
351
+
352
+ # x.shape = [batch_size, n_ctx, transformer.width]
353
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
354
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
355
+
356
+ return x
357
+
358
+ def forward(self, image, text):
359
+ image_features = self.encode_image(image)
360
+ text_features = self.encode_text(text)
361
+
362
+ # normalized features
363
+ image_features = image_features / image_features.norm(dim=1, keepdim=True)
364
+ text_features = text_features / text_features.norm(dim=1, keepdim=True)
365
+
366
+ # cosine similarity as logits
367
+ logit_scale = self.logit_scale.exp()
368
+ logits_per_image = logit_scale * image_features @ text_features.t()
369
+ logits_per_text = logits_per_image.t()
370
+
371
+ # shape = [global_batch_size, global_batch_size]
372
+ return logits_per_image, logits_per_text
373
+
374
+
375
+ def convert_weights(model: nn.Module):
376
+ """Convert applicable model parameters to fp16"""
377
+
378
+ def _convert_weights_to_fp16(l):
379
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
380
+ l.weight.data = l.weight.data.half()
381
+ if l.bias is not None:
382
+ l.bias.data = l.bias.data.half()
383
+
384
+ if isinstance(l, nn.MultiheadAttention):
385
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
386
+ tensor = getattr(l, attr)
387
+ if tensor is not None:
388
+ tensor.data = tensor.data.half()
389
+
390
+ for name in ["text_projection", "proj"]:
391
+ if hasattr(l, name):
392
+ attr = getattr(l, name)
393
+ if attr is not None:
394
+ attr.data = attr.data.half()
395
+
396
+ model.apply(_convert_weights_to_fp16)
397
+
398
+
399
+ def build_model(state_dict: dict):
400
+ vit = "visual.proj" in state_dict
401
+
402
+ if vit:
403
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
404
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
405
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
406
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
407
+ image_resolution = vision_patch_size * grid_size
408
+ else:
409
+ counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
410
+ vision_layers = tuple(counts)
411
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
412
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
413
+ vision_patch_size = None
414
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
415
+ image_resolution = output_width * 32
416
+
417
+ embed_dim = state_dict["text_projection"].shape[1]
418
+ context_length = state_dict["positional_embedding"].shape[0]
419
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
420
+ transformer_width = state_dict["ln_final.weight"].shape[0]
421
+ transformer_heads = transformer_width // 64
422
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
423
+
424
+ model = CLIP(
425
+ embed_dim,
426
+ image_resolution, vision_layers, vision_width, vision_patch_size,
427
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
428
+ )
429
+
430
+ for key in ["input_resolution", "context_length", "vocab_size"]:
431
+ if key in state_dict:
432
+ del state_dict[key]
433
+
434
+ convert_weights(model)
435
+ model.load_state_dict(state_dict)
436
+ return model.eval()
clip/simple_tokenizer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import html
3
+ import os
4
+ from functools import lru_cache
5
+
6
+ import ftfy
7
+ import regex as re
8
+
9
+
10
+ @lru_cache()
11
+ def default_bpe():
12
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
+
14
+
15
+ @lru_cache()
16
+ def bytes_to_unicode():
17
+ """
18
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
19
+ The reversible bpe codes work on unicode strings.
20
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
23
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
25
+ """
26
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
27
+ cs = bs[:]
28
+ n = 0
29
+ for b in range(2**8):
30
+ if b not in bs:
31
+ bs.append(b)
32
+ cs.append(2**8+n)
33
+ n += 1
34
+ cs = [chr(n) for n in cs]
35
+ return dict(zip(bs, cs))
36
+
37
+
38
+ def get_pairs(word):
39
+ """Return set of symbol pairs in a word.
40
+ Word is represented as tuple of symbols (symbols being variable-length strings).
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+ return pairs
48
+
49
+
50
+ def basic_clean(text):
51
+ text = ftfy.fix_text(text)
52
+ text = html.unescape(html.unescape(text))
53
+ return text.strip()
54
+
55
+
56
+ def whitespace_clean(text):
57
+ text = re.sub(r'\s+', ' ', text)
58
+ text = text.strip()
59
+ return text
60
+
61
+
62
+ class SimpleTokenizer(object):
63
+ def __init__(self, bpe_path: str = default_bpe()):
64
+ self.byte_encoder = bytes_to_unicode()
65
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
+ merges = merges[1:49152-256-2+1]
68
+ merges = [tuple(merge.split()) for merge in merges]
69
+ vocab = list(bytes_to_unicode().values())
70
+ vocab = vocab + [v+'</w>' for v in vocab]
71
+ for merge in merges:
72
+ vocab.append(''.join(merge))
73
+ vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
+ self.encoder = dict(zip(vocab, range(len(vocab))))
75
+ self.decoder = {v: k for k, v in self.encoder.items()}
76
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
+ self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
+ self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
79
+
80
+ def bpe(self, token):
81
+ if token in self.cache:
82
+ return self.cache[token]
83
+ word = tuple(token[:-1]) + ( token[-1] + '</w>',)
84
+ pairs = get_pairs(word)
85
+
86
+ if not pairs:
87
+ return token+'</w>'
88
+
89
+ while True:
90
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
91
+ if bigram not in self.bpe_ranks:
92
+ break
93
+ first, second = bigram
94
+ new_word = []
95
+ i = 0
96
+ while i < len(word):
97
+ try:
98
+ j = word.index(first, i)
99
+ new_word.extend(word[i:j])
100
+ i = j
101
+ except:
102
+ new_word.extend(word[i:])
103
+ break
104
+
105
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
106
+ new_word.append(first+second)
107
+ i += 2
108
+ else:
109
+ new_word.append(word[i])
110
+ i += 1
111
+ new_word = tuple(new_word)
112
+ word = new_word
113
+ if len(word) == 1:
114
+ break
115
+ else:
116
+ pairs = get_pairs(word)
117
+ word = ' '.join(word)
118
+ self.cache[token] = word
119
+ return word
120
+
121
+ def encode(self, text):
122
+ bpe_tokens = []
123
+ text = whitespace_clean(basic_clean(text)).lower()
124
+ for token in re.findall(self.pat, text):
125
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
126
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
127
+ return bpe_tokens
128
+
129
+ def decode(self, tokens):
130
+ text = ''.join([self.decoder[token] for token in tokens])
131
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
132
+ return text
clip/vitseg.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from posixpath import basename, dirname, join
3
+ # import clip
4
+ from clip.model import convert_weights
5
+ import torch
6
+ import json
7
+ from torch import nn
8
+ from torch.nn import functional as nnf
9
+ from torch.nn.modules import activation
10
+ from torch.nn.modules.activation import ReLU
11
+ from torchvision import transforms
12
+
13
+ normalize = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
14
+
15
+ from torchvision.models import ResNet
16
+
17
+
18
+ def process_prompts(conditional, prompt_list, conditional_map):
19
+ # DEPRECATED
20
+
21
+ # randomly sample a synonym
22
+ words = [conditional_map[int(i)] for i in conditional]
23
+ words = [syns[torch.multinomial(torch.ones(len(syns)), 1, replacement=True).item()] for syns in words]
24
+ words = [w.replace('_', ' ') for w in words]
25
+
26
+ if prompt_list is not None:
27
+ prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
28
+ prompts = [prompt_list[i] for i in prompt_indices]
29
+ else:
30
+ prompts = ['a photo of {}'] * (len(words))
31
+
32
+ return [promt.format(w) for promt, w in zip(prompts, words)]
33
+
34
+
35
+ class VITDenseBase(nn.Module):
36
+
37
+ def rescaled_pos_emb(self, new_size):
38
+ assert len(new_size) == 2
39
+
40
+ a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape)
41
+ b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T
42
+ return torch.cat([self.model.positional_embedding[:1], b])
43
+
44
+ def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None):
45
+
46
+ with torch.no_grad():
47
+
48
+ x_inp = nnf.interpolate(x_inp, (384, 384))
49
+
50
+ x = self.model.patch_embed(x_inp)
51
+ cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
52
+ if self.model.dist_token is None:
53
+ x = torch.cat((cls_token, x), dim=1)
54
+ else:
55
+ x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
56
+ x = self.model.pos_drop(x + self.model.pos_embed)
57
+
58
+ activations = []
59
+ for i, block in enumerate(self.model.blocks):
60
+ x = block(x)
61
+
62
+ if i in extract_layers:
63
+ # permute to be compatible with CLIP
64
+ activations += [x.permute(1,0,2)]
65
+
66
+ x = self.model.norm(x)
67
+ x = self.model.head(self.model.pre_logits(x[:, 0]))
68
+
69
+ # again for CLIP compatibility
70
+ # x = x.permute(1, 0, 2)
71
+
72
+ return x, activations, None
73
+
74
+ def sample_prompts(self, words, prompt_list=None):
75
+
76
+ prompt_list = prompt_list if prompt_list is not None else self.prompt_list
77
+
78
+ prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
79
+ prompts = [prompt_list[i] for i in prompt_indices]
80
+ return [promt.format(w) for promt, w in zip(prompts, words)]
81
+
82
+ def get_cond_vec(self, conditional, batch_size):
83
+ # compute conditional from a single string
84
+ if conditional is not None and type(conditional) == str:
85
+ cond = self.compute_conditional(conditional)
86
+ cond = cond.repeat(batch_size, 1)
87
+
88
+ # compute conditional from string list/tuple
89
+ elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str:
90
+ assert len(conditional) == batch_size
91
+ cond = self.compute_conditional(conditional)
92
+
93
+ # use conditional directly
94
+ elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2:
95
+ cond = conditional
96
+
97
+ # compute conditional from image
98
+ elif conditional is not None and type(conditional) == torch.Tensor:
99
+ with torch.no_grad():
100
+ cond, _, _ = self.visual_forward(conditional)
101
+ else:
102
+ raise ValueError('invalid conditional')
103
+ return cond
104
+
105
+ def compute_conditional(self, conditional):
106
+ import clip
107
+
108
+ dev = next(self.parameters()).device
109
+
110
+ if type(conditional) in {list, tuple}:
111
+ text_tokens = clip.tokenize(conditional).to(dev)
112
+ cond = self.clip_model.encode_text(text_tokens)
113
+ else:
114
+ if conditional in self.precomputed_prompts:
115
+ cond = self.precomputed_prompts[conditional].float().to(dev)
116
+ else:
117
+ text_tokens = clip.tokenize([conditional]).to(dev)
118
+ cond = self.clip_model.encode_text(text_tokens)[0]
119
+
120
+ return cond
121
+
122
+
123
+ class VITDensePredT(VITDenseBase):
124
+
125
+ def __init__(self, extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed',
126
+ depth=3, extra_blocks=0, reduce_cond=None, fix_shift=False,
127
+ learn_trans_conv_only=False, refine=None, limit_to_clip_only=False, upsample=False,
128
+ add_calibration=False, process_cond=None, not_pretrained=False):
129
+ super().__init__()
130
+ # device = 'cpu'
131
+
132
+ self.extract_layers = extract_layers
133
+ self.cond_layer = cond_layer
134
+ self.limit_to_clip_only = limit_to_clip_only
135
+ self.process_cond = None
136
+
137
+ if add_calibration:
138
+ self.calibration_conds = 1
139
+
140
+ self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None
141
+
142
+ self.add_activation1 = True
143
+
144
+ import timm
145
+ self.model = timm.create_model('vit_base_patch16_384', pretrained=True)
146
+ self.model.head = nn.Linear(768, 512 if reduce_cond is None else reduce_cond)
147
+
148
+ for p in self.model.parameters():
149
+ p.requires_grad_(False)
150
+
151
+ import clip
152
+ self.clip_model, _ = clip.load('ViT-B/16', device='cpu', jit=False)
153
+ # del self.clip_model.visual
154
+
155
+
156
+ self.token_shape = (14, 14)
157
+
158
+ # conditional
159
+ if reduce_cond is not None:
160
+ self.reduce_cond = nn.Linear(512, reduce_cond)
161
+ for p in self.reduce_cond.parameters():
162
+ p.requires_grad_(False)
163
+ else:
164
+ self.reduce_cond = None
165
+
166
+ # self.film = AVAILABLE_BLOCKS['film'](512, 128)
167
+ self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
168
+ self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
169
+
170
+ # DEPRECATED
171
+ # self.conditional_map = {c['id']: c['synonyms'] for c in json.load(open(cond_map))}
172
+
173
+ assert len(self.extract_layers) == depth
174
+
175
+ self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)])
176
+ self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))])
177
+ self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)])
178
+
179
+ trans_conv_ks = (16, 16)
180
+ self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
181
+
182
+ # refinement and trans conv
183
+
184
+ if learn_trans_conv_only:
185
+ for p in self.parameters():
186
+ p.requires_grad_(False)
187
+
188
+ for p in self.trans_conv.parameters():
189
+ p.requires_grad_(True)
190
+
191
+ if prompt == 'fixed':
192
+ self.prompt_list = ['a photo of a {}.']
193
+ elif prompt == 'shuffle':
194
+ self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.']
195
+ elif prompt == 'shuffle+':
196
+ self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.',
197
+ 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.',
198
+ 'a bad photo of a {}.', 'a photo of the {}.']
199
+ elif prompt == 'shuffle_clip':
200
+ from models.clip_prompts import imagenet_templates
201
+ self.prompt_list = imagenet_templates
202
+
203
+ if process_cond is not None:
204
+ if process_cond == 'clamp' or process_cond[0] == 'clamp':
205
+
206
+ val = process_cond[1] if type(process_cond) in {list, tuple} else 0.2
207
+
208
+ def clamp_vec(x):
209
+ return torch.clamp(x, -val, val)
210
+
211
+ self.process_cond = clamp_vec
212
+
213
+ elif process_cond.endswith('.pth'):
214
+
215
+ shift = torch.load(process_cond)
216
+ def add_shift(x):
217
+ return x + shift.to(x.device)
218
+
219
+ self.process_cond = add_shift
220
+
221
+ import pickle
222
+ precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb'))
223
+ self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()}
224
+
225
+
226
+ def forward(self, inp_image, conditional=None, return_features=False, mask=None):
227
+
228
+ assert type(return_features) == bool
229
+
230
+ # inp_image = inp_image.to(self.model.positional_embedding.device)
231
+
232
+ if mask is not None:
233
+ raise ValueError('mask not supported')
234
+
235
+ # x_inp = normalize(inp_image)
236
+ x_inp = inp_image
237
+
238
+ bs, dev = inp_image.shape[0], x_inp.device
239
+
240
+ inp_image_size = inp_image.shape[2:]
241
+
242
+ cond = self.get_cond_vec(conditional, bs)
243
+
244
+ visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers))
245
+
246
+ activation1 = activations[0]
247
+ activations = activations[1:]
248
+
249
+ a = None
250
+ for i, (activation, block, reduce) in enumerate(zip(activations[::-1], self.blocks, self.reduces)):
251
+
252
+ if a is not None:
253
+ a = reduce(activation) + a
254
+ else:
255
+ a = reduce(activation)
256
+
257
+ if i == self.cond_layer:
258
+ if self.reduce_cond is not None:
259
+ cond = self.reduce_cond(cond)
260
+
261
+ a = self.film_mul(cond) * a + self.film_add(cond)
262
+
263
+ a = block(a)
264
+
265
+ for block in self.extra_blocks:
266
+ a = a + block(a)
267
+
268
+ a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
269
+
270
+ size = int(math.sqrt(a.shape[2]))
271
+
272
+ a = a.view(bs, a.shape[1], size, size)
273
+
274
+ if self.trans_conv is not None:
275
+ a = self.trans_conv(a)
276
+
277
+ if self.upsample_proj is not None:
278
+ a = self.upsample_proj(a)
279
+ a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear')
280
+
281
+ a = nnf.interpolate(a, inp_image_size)
282
+
283
+ if return_features:
284
+ return a, visual_q, cond, [activation1] + activations
285
+ else:
286
+ return a,
config_colab.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ clear_output: true
2
+ force_cpu: false
3
+ max_threads: 3
4
+ memory_limit: 0
5
+ output_image_format: png
6
+ output_template: '{file}_{time}'
7
+ output_video_codec: libx264
8
+ output_video_format: mp4
9
+ provider: cuda
10
+ selected_theme: Default
11
+ server_name: ''
12
+ server_port: 0
13
+ server_share: true
14
+ video_quality: 14
handler.py CHANGED
@@ -31,7 +31,7 @@ import tempfile
31
 
32
  from rembg import remove
33
  import onnxruntime as ort
34
-
35
 
36
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
 
@@ -187,54 +187,44 @@ class EndpointHandler():
187
  f.write("="*30 + "\n")
188
 
189
  def convert_to_playable_format(self, input_path, output_path):
190
- command = [
191
- "ffmpeg",
192
- "-i", input_path,
193
- "-c:v", "libx264",
194
- "-preset", "fast",
195
- "-crf", "18",
196
- "-y", # Overwrite output file if it exists
197
- output_path
198
- ]
199
- result = subprocess.run(command, capture_output=True, text=True)
200
  print("Conversion STDOUT:", result.stdout)
201
  print("Conversion STDERR:", result.stderr)
202
 
203
  if result.returncode != 0:
204
  raise RuntimeError(f"FFmpeg conversion failed with exit code {result.returncode}")
205
 
 
 
206
  def run_rife_interpolation(self, video_path, output_path, multi=2, scale=1.0):
207
  base_dir = os.path.dirname(os.path.abspath(__file__))
208
  directory = os.path.join(base_dir, "Practical-RIFE", "inference_video.py")
209
  model_directory = os.path.join(base_dir, "Practical-RIFE", "train_log")
210
- command = [
211
- "python",
212
- directory,
213
- f"--video={video_path}",
214
- f"--output={output_path}",
215
- f"--multi={multi}",
216
- f"--scale={scale}",
217
- f"--model={model_directory}",
218
- ]
219
-
220
- result = subprocess.run(command, capture_output=True, text=True)
221
  print(result)
222
  print(result.stdout)
223
  print(result.stderr)
224
 
225
  if result.returncode != 0:
226
  raise RuntimeError(f"RIFE interpolation failed with exit code {result.returncode}")
227
- self.convert_to_playable_format(output_path, "completed_playable.mp4")
 
 
228
 
229
  def speed_up_video(self, input_path, output_path, factor=4):
230
- command = [
231
- "ffmpeg",
232
- "-i", input_path,
233
- "-filter:v", f"setpts=PTS/{factor}",
234
- "-an", # Remove audio
235
- output_path
236
- ]
237
- result = subprocess.run(command, capture_output=True, text=True)
238
  print("Speed Up Video STDOUT:", result.stdout)
239
  print("Speed Up Video STDERR:", result.stderr)
240
 
@@ -242,14 +232,10 @@ class EndpointHandler():
242
  raise RuntimeError(f"FFmpeg speed up failed with exit code {result.returncode}")
243
 
244
  def slow_down_video(self, input_path, output_path, factor=4):
245
- command = [
246
- "ffmpeg",
247
- "-i", input_path,
248
- "-filter:v", f"setpts={factor}*PTS",
249
- "-an", # Remove audio
250
- output_path
251
- ]
252
- result = subprocess.run(command, capture_output=True, text=True)
253
  print("Slow Down Video STDOUT:", result.stdout)
254
  print("Slow Down Video STDERR:", result.stderr)
255
 
@@ -319,11 +305,10 @@ class EndpointHandler():
319
  pose_output_path = os.path.join(temp_dir, "pose_videos")
320
 
321
  # Run the extract_dwpose_from_vid.py script
322
- command = [
323
- "python", "extract_dwpose_from_vid.py",
324
- "--video_root", video_root
325
- ]
326
- result = subprocess.run(command, capture_output=True, text=True)
327
  if result.returncode != 0:
328
  raise RuntimeError(f"Error running extract_dwpose_from_vid.py: {result.stderr}")
329
 
@@ -377,18 +362,19 @@ class EndpointHandler():
377
 
378
  # Perform face swapping
379
  # self.print_directory_contents(temp_dir)
380
- # swapped_face_video_path = os.path.join(save_dir, "swapped_face_output.mp4")
381
- # self._swap_face(cropped_face_path, animation_path, swapped_face_video_path)
382
 
383
  # Slow down the produced video by 4x
384
  self.print_directory_contents(temp_dir)
385
  slowed_down_animation_path = os.path.join(save_dir, "slowed_down_animation_output.mp4")
386
- self.slow_down_video(animation_path, slowed_down_animation_path, factor=4)
387
 
388
  # Clear CUDA cache before RIFE interpolation
389
  torch.cuda.empty_cache()
390
 
391
  # Perform RIFE interpolation
 
392
  rife_output_path = os.path.join(save_dir, "completed_result.mp4")
393
  self.run_rife_interpolation(slowed_down_animation_path, rife_output_path, multi=2, scale=0.5)
394
 
 
31
 
32
  from rembg import remove
33
  import onnxruntime as ort
34
+ import shutil
35
 
36
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
 
 
187
  f.write("="*30 + "\n")
188
 
189
  def convert_to_playable_format(self, input_path, output_path):
190
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
191
+ temp_output_path = tmp_file.name
192
+
193
+ command = f"ffmpeg -i {input_path} -c:v libx264 -preset fast -crf 18 -y {temp_output_path}"
194
+
195
+ # Run the command with shell=True
196
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
 
 
 
197
  print("Conversion STDOUT:", result.stdout)
198
  print("Conversion STDERR:", result.stderr)
199
 
200
  if result.returncode != 0:
201
  raise RuntimeError(f"FFmpeg conversion failed with exit code {result.returncode}")
202
 
203
+ shutil.move(temp_output_path, output_path)
204
+
205
  def run_rife_interpolation(self, video_path, output_path, multi=2, scale=1.0):
206
  base_dir = os.path.dirname(os.path.abspath(__file__))
207
  directory = os.path.join(base_dir, "Practical-RIFE", "inference_video.py")
208
  model_directory = os.path.join(base_dir, "Practical-RIFE", "train_log")
209
+ command = f"python {directory} --video={video_path} --output={output_path} --multi={multi} --scale={scale} --model={model_directory}"
210
+
211
+ # Run the command with shell=True
212
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
 
 
 
 
 
 
 
213
  print(result)
214
  print(result.stdout)
215
  print(result.stderr)
216
 
217
  if result.returncode != 0:
218
  raise RuntimeError(f"RIFE interpolation failed with exit code {result.returncode}")
219
+
220
+ # Overwrite the RIFE output with the converted playable format
221
+ self.convert_to_playable_format(output_path, output_path)
222
 
223
  def speed_up_video(self, input_path, output_path, factor=4):
224
+ command = f"ffmpeg -i {input_path} -filter:v setpts=PTS/{factor} -an {output_path}"
225
+
226
+ # Run the command with shell=True
227
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
 
 
 
 
228
  print("Speed Up Video STDOUT:", result.stdout)
229
  print("Speed Up Video STDERR:", result.stderr)
230
 
 
232
  raise RuntimeError(f"FFmpeg speed up failed with exit code {result.returncode}")
233
 
234
  def slow_down_video(self, input_path, output_path, factor=4):
235
+ command = f"ffmpeg -i {input_path} -filter:v setpts={factor}*PTS -an {output_path}"
236
+
237
+ # Run the command with shell=True
238
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
 
 
 
 
239
  print("Slow Down Video STDOUT:", result.stdout)
240
  print("Slow Down Video STDERR:", result.stderr)
241
 
 
305
  pose_output_path = os.path.join(temp_dir, "pose_videos")
306
 
307
  # Run the extract_dwpose_from_vid.py script
308
+ command = f'python extract_dwpose_from_vid.py --video_root {video_root}'
309
+
310
+ # Run the command with shell=True
311
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
 
312
  if result.returncode != 0:
313
  raise RuntimeError(f"Error running extract_dwpose_from_vid.py: {result.stderr}")
314
 
 
362
 
363
  # Perform face swapping
364
  # self.print_directory_contents(temp_dir)
365
+ swapped_face_video_path = os.path.join(save_dir, "swapped_face_output.mp4")
366
+ self._swap_face('./good_face.jpeg', animation_path, swapped_face_video_path)
367
 
368
  # Slow down the produced video by 4x
369
  self.print_directory_contents(temp_dir)
370
  slowed_down_animation_path = os.path.join(save_dir, "slowed_down_animation_output.mp4")
371
+ self.slow_down_video(swapped_face_video_path, slowed_down_animation_path, factor=4)
372
 
373
  # Clear CUDA cache before RIFE interpolation
374
  torch.cuda.empty_cache()
375
 
376
  # Perform RIFE interpolation
377
+ # self.print_directory_contents(temp_dir)
378
  rife_output_path = os.path.join(save_dir, "completed_result.mp4")
379
  self.run_rife_interpolation(slowed_down_animation_path, rife_output_path, multi=2, scale=0.5)
380
 
inference_img.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ from torch.nn import functional as F
6
+ import warnings
7
+ warnings.filterwarnings("ignore")
8
+
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+ torch.set_grad_enabled(False)
11
+ if torch.cuda.is_available():
12
+ torch.backends.cudnn.enabled = True
13
+ torch.backends.cudnn.benchmark = True
14
+
15
+ parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
16
+ parser.add_argument('--img', dest='img', nargs=2, required=True)
17
+ parser.add_argument('--exp', default=4, type=int)
18
+ parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
19
+ parser.add_argument('--rthreshold', default=0.02, type=float, help='returns image when actual ratio falls in given range threshold')
20
+ parser.add_argument('--rmaxcycles', default=8, type=int, help='limit max number of bisectional cycles')
21
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
22
+
23
+ args = parser.parse_args()
24
+
25
+ try:
26
+ try:
27
+ from model.RIFE_HDv2 import Model
28
+ model = Model()
29
+ model.load_model(args.modelDir, -1)
30
+ print("Loaded v2.x HD model.")
31
+ except:
32
+ from train_log.RIFE_HDv3 import Model
33
+ model = Model()
34
+ model.load_model(args.modelDir, -1)
35
+ print("Loaded v3.x HD model.")
36
+ except:
37
+ from model.RIFE_HD import Model
38
+ model = Model()
39
+ model.load_model(args.modelDir, -1)
40
+ print("Loaded v1.x HD model")
41
+ if not hasattr(model, 'version'):
42
+ model.version = 0
43
+ model.eval()
44
+ model.device()
45
+
46
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
47
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
48
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
49
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
50
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)
51
+
52
+ else:
53
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
54
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
55
+ img0 = cv2.resize(img0, (448, 256))
56
+ img1 = cv2.resize(img1, (448, 256))
57
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
58
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
59
+
60
+ n, c, h, w = img0.shape
61
+ ph = ((h - 1) // 64 + 1) * 64
62
+ pw = ((w - 1) // 64 + 1) * 64
63
+ padding = (0, pw - w, 0, ph - h)
64
+ img0 = F.pad(img0, padding)
65
+ img1 = F.pad(img1, padding)
66
+
67
+
68
+ if args.ratio:
69
+ if model.version >= 3.9:
70
+ img_list = [img0, model.inference(img0, img1, args.ratio), img1]
71
+ else:
72
+ img0_ratio = 0.0
73
+ img1_ratio = 1.0
74
+ if args.ratio <= img0_ratio + args.rthreshold / 2:
75
+ middle = img0
76
+ elif args.ratio >= img1_ratio - args.rthreshold / 2:
77
+ middle = img1
78
+ else:
79
+ tmp_img0 = img0
80
+ tmp_img1 = img1
81
+ for inference_cycle in range(args.rmaxcycles):
82
+ middle = model.inference(tmp_img0, tmp_img1)
83
+ middle_ratio = ( img0_ratio + img1_ratio ) / 2
84
+ if args.ratio - (args.rthreshold / 2) <= middle_ratio <= args.ratio + (args.rthreshold / 2):
85
+ break
86
+ if args.ratio > middle_ratio:
87
+ tmp_img0 = middle
88
+ img0_ratio = middle_ratio
89
+ else:
90
+ tmp_img1 = middle
91
+ img1_ratio = middle_ratio
92
+ img_list.append(middle)
93
+ img_list.append(img1)
94
+ else:
95
+ if model.version >= 3.9:
96
+ img_list = [img0]
97
+ n = 2 ** args.exp
98
+ for i in range(n-1):
99
+ img_list.append(model.inference(img0, img1, (i+1) * 1. / n))
100
+ img_list.append(img1)
101
+ else:
102
+ img_list = [img0, img1]
103
+ for i in range(args.exp):
104
+ tmp = []
105
+ for j in range(len(img_list) - 1):
106
+ mid = model.inference(img_list[j], img_list[j + 1])
107
+ tmp.append(img_list[j])
108
+ tmp.append(mid)
109
+ tmp.append(img1)
110
+ img_list = tmp
111
+
112
+ if not os.path.exists('output'):
113
+ os.mkdir('output')
114
+ for i in range(len(img_list)):
115
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
116
+ cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).cpu().numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
117
+ else:
118
+ cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
inference_img_SR.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ from torch.nn import functional as F
6
+ import warnings
7
+ warnings.filterwarnings("ignore")
8
+
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+ torch.set_grad_enabled(False)
11
+ if torch.cuda.is_available():
12
+ torch.backends.cudnn.enabled = True
13
+ torch.backends.cudnn.benchmark = True
14
+
15
+ parser = argparse.ArgumentParser(description='STVSR for a pair of images')
16
+ parser.add_argument('--img', dest='img', nargs=2, required=True)
17
+ parser.add_argument('--exp', default=2, type=int)
18
+ parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
19
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
20
+
21
+ args = parser.parse_args()
22
+
23
+ from train_log.model import Model
24
+ model = Model()
25
+ model.device()
26
+ model.load_model('train_log')
27
+ model.eval()
28
+
29
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
30
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
31
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
32
+ img0 = cv2.resize(img0, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
33
+ img1 = cv2.resize(img1, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
34
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
35
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)
36
+ else:
37
+ img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
38
+ img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
39
+ img0 = cv2.resize(img0, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
40
+ img1 = cv2.resize(img1, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
41
+ img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
42
+ img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
43
+
44
+ n, c, h, w = img0.shape
45
+ ph = ((h - 1) // 32 + 1) * 32
46
+ pw = ((w - 1) // 32 + 1) * 32
47
+ padding = (0, pw - w, 0, ph - h)
48
+ img0 = F.pad(img0, padding)
49
+ img1 = F.pad(img1, padding)
50
+
51
+ if args.ratio:
52
+ print('ratio={}'.format(args.ratio))
53
+ img_list = model.inference(img0, img1, timestep=args.ratio)
54
+ else:
55
+ n = 2 ** args.exp - 1
56
+ time_list = [0]
57
+ for i in range(n):
58
+ time_list.append((i+1) * 1. / (n+1))
59
+ time_list.append(1)
60
+ print(time_list)
61
+ img_list = model.inference(img0, img1, timestep=time_list)
62
+
63
+ if not os.path.exists('output'):
64
+ os.mkdir('output')
65
+ for i in range(len(img_list)):
66
+ if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
67
+ cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).cpu().numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
68
+ else:
69
+ cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
inference_video.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ from torch.nn import functional as F
8
+ import warnings
9
+ import _thread
10
+ import skvideo.io
11
+ from queue import Queue, Empty
12
+ from model.pytorch_msssim import ssim_matlab
13
+
14
+ warnings.filterwarnings("ignore")
15
+
16
+ def transferAudio(sourceVideo, targetVideo):
17
+ import shutil
18
+ import moviepy.editor
19
+ tempAudioFileName = "./temp/audio.mkv"
20
+
21
+ # split audio from original video file and store in "temp" directory
22
+ if True:
23
+
24
+ # clear old "temp" directory if it exits
25
+ if os.path.isdir("temp"):
26
+ # remove temp directory
27
+ shutil.rmtree("temp")
28
+ # create new "temp" directory
29
+ os.makedirs("temp")
30
+ # extract audio from video
31
+ os.system('ffmpeg -y -i "{}" -c:a copy -vn {}'.format(sourceVideo, tempAudioFileName))
32
+
33
+ targetNoAudio = os.path.splitext(targetVideo)[0] + "_noaudio" + os.path.splitext(targetVideo)[1]
34
+ os.rename(targetVideo, targetNoAudio)
35
+ # combine audio file and new video file
36
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
37
+
38
+ if os.path.getsize(targetVideo) == 0: # if ffmpeg failed to merge the video and audio together try converting the audio to aac
39
+ tempAudioFileName = "./temp/audio.m4a"
40
+ os.system('ffmpeg -y -i "{}" -c:a aac -b:a 160k -vn {}'.format(sourceVideo, tempAudioFileName))
41
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
42
+ if (os.path.getsize(targetVideo) == 0): # if aac is not supported by selected format
43
+ os.rename(targetNoAudio, targetVideo)
44
+ print("Audio transfer failed. Interpolated video will have no audio")
45
+ else:
46
+ print("Lossless audio transfer failed. Audio was transcoded to AAC (M4A) instead.")
47
+
48
+ # remove audio-less video
49
+ os.remove(targetNoAudio)
50
+ else:
51
+ os.remove(targetNoAudio)
52
+
53
+ # remove temp directory
54
+ shutil.rmtree("temp")
55
+
56
+ parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
57
+ parser.add_argument('--video', dest='video', type=str, default=None)
58
+ parser.add_argument('--output', dest='output', type=str, default=None)
59
+ parser.add_argument('--img', dest='img', type=str, default=None)
60
+ parser.add_argument('--montage', dest='montage', action='store_true', help='montage origin video')
61
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
62
+ parser.add_argument('--fp16', dest='fp16', action='store_true', help='fp16 mode for faster and more lightweight inference on cards with Tensor Cores')
63
+ parser.add_argument('--UHD', dest='UHD', action='store_true', help='support 4k video')
64
+ parser.add_argument('--scale', dest='scale', type=float, default=1.0, help='Try scale=0.5 for 4k video')
65
+ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
66
+ parser.add_argument('--fps', dest='fps', type=int, default=None)
67
+ parser.add_argument('--png', dest='png', action='store_true', help='whether to vid_out png format vid_outs')
68
+ parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='vid_out video extension')
69
+ parser.add_argument('--exp', dest='exp', type=int, default=1)
70
+ parser.add_argument('--multi', dest='multi', type=int, default=2)
71
+
72
+ args = parser.parse_args()
73
+ if args.exp != 1:
74
+ args.multi = (2 ** args.exp)
75
+ assert (not args.video is None or not args.img is None)
76
+ if args.skip:
77
+ print("skip flag is abandoned, please refer to issue #207.")
78
+ if args.UHD and args.scale==1.0:
79
+ args.scale = 0.5
80
+ assert args.scale in [0.25, 0.5, 1.0, 2.0, 4.0]
81
+ if not args.img is None:
82
+ args.png = True
83
+
84
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
85
+ torch.set_grad_enabled(False)
86
+ if torch.cuda.is_available():
87
+ torch.backends.cudnn.enabled = True
88
+ torch.backends.cudnn.benchmark = True
89
+ if(args.fp16):
90
+ torch.set_default_tensor_type(torch.cuda.HalfTensor)
91
+
92
+ try:
93
+ from train_log.RIFE_HDv3 import Model
94
+ except:
95
+ print("Please download our model from model list")
96
+ model = Model()
97
+ if not hasattr(model, 'version'):
98
+ model.version = 0
99
+ model.load_model(args.modelDir, -1)
100
+ print("Loaded 3.x/4.x HD model.")
101
+ model.eval()
102
+ model.device()
103
+
104
+ if not args.video is None:
105
+ videoCapture = cv2.VideoCapture(args.video)
106
+ fps = videoCapture.get(cv2.CAP_PROP_FPS)
107
+ tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
108
+ videoCapture.release()
109
+ if args.fps is None:
110
+ fpsNotAssigned = True
111
+ args.fps = fps * args.multi
112
+ else:
113
+ fpsNotAssigned = False
114
+ videogen = skvideo.io.vreader(args.video)
115
+ lastframe = next(videogen)
116
+ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
117
+ video_path_wo_ext, ext = os.path.splitext(args.video)
118
+ print('{}.{}, {} frames in total, {}FPS to {}FPS'.format(video_path_wo_ext, args.ext, tot_frame, fps, args.fps))
119
+ if args.png == False and fpsNotAssigned == True:
120
+ print("The audio will be merged after interpolation process")
121
+ else:
122
+ print("Will not merge audio because using png or fps flag!")
123
+ else:
124
+ videogen = []
125
+ for f in os.listdir(args.img):
126
+ if 'png' in f:
127
+ videogen.append(f)
128
+ tot_frame = len(videogen)
129
+ videogen.sort(key= lambda x:int(x[:-4]))
130
+ lastframe = cv2.imread(os.path.join(args.img, videogen[0]), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
131
+ videogen = videogen[1:]
132
+ h, w, _ = lastframe.shape
133
+ vid_out_name = None
134
+ vid_out = None
135
+ if args.png:
136
+ if not os.path.exists('vid_out'):
137
+ os.mkdir('vid_out')
138
+ else:
139
+ if args.output is not None:
140
+ print("Out")
141
+ vid_out_name = args.output
142
+ else:
143
+ vid_out_name = '{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.multi, int(np.round(args.fps)), args.ext)
144
+ print("Width is ", w," and height is ", h)
145
+ vid_out = cv2.VideoWriter(vid_out_name, fourcc, args.fps, (w, h))
146
+
147
+ def clear_write_buffer(user_args, write_buffer):
148
+ cnt = 0
149
+ while True:
150
+ item = write_buffer.get()
151
+ if item is None:
152
+ break
153
+ if user_args.png:
154
+ cv2.imwrite('vid_out/{:0>7d}.png'.format(cnt), item[:, :, ::-1])
155
+ cnt += 1
156
+ else:
157
+ vid_out.write(item[:, :, ::-1])
158
+
159
+ def build_read_buffer(user_args, read_buffer, videogen):
160
+ try:
161
+ for frame in videogen:
162
+ if not user_args.img is None:
163
+ frame = cv2.imread(os.path.join(user_args.img, frame), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
164
+ if user_args.montage:
165
+ frame = frame[:, left: left + w]
166
+ read_buffer.put(frame)
167
+ except:
168
+ pass
169
+ read_buffer.put(None)
170
+
171
+ def make_inference(I0, I1, n):
172
+ global model
173
+ if model.version >= 3.9:
174
+ res = []
175
+ for i in range(n):
176
+ res.append(model.inference(I0, I1, (i+1) * 1. / (n+1), args.scale))
177
+ return res
178
+ else:
179
+ middle = model.inference(I0, I1, args.scale)
180
+ if n == 1:
181
+ return [middle]
182
+ first_half = make_inference(I0, middle, n=n//2)
183
+ second_half = make_inference(middle, I1, n=n//2)
184
+ if n%2:
185
+ return [*first_half, middle, *second_half]
186
+ else:
187
+ return [*first_half, *second_half]
188
+
189
+ def pad_image(img):
190
+ if(args.fp16):
191
+ return F.pad(img, padding).half()
192
+ else:
193
+ return F.pad(img, padding)
194
+
195
+ if args.montage:
196
+ left = w // 4
197
+ w = w // 2
198
+ tmp = max(128, int(128 / args.scale))
199
+ ph = ((h - 1) // tmp + 1) * tmp
200
+ pw = ((w - 1) // tmp + 1) * tmp
201
+ padding = (0, pw - w, 0, ph - h)
202
+ pbar = tqdm(total=tot_frame)
203
+ if args.montage:
204
+ lastframe = lastframe[:, left: left + w]
205
+ write_buffer = Queue(maxsize=500)
206
+ read_buffer = Queue(maxsize=500)
207
+ _thread.start_new_thread(build_read_buffer, (args, read_buffer, videogen))
208
+ _thread.start_new_thread(clear_write_buffer, (args, write_buffer))
209
+
210
+ I1 = torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
211
+ I1 = pad_image(I1)
212
+ temp = None # save lastframe when processing static frame
213
+
214
+ while True:
215
+ if temp is not None:
216
+ frame = temp
217
+ temp = None
218
+ else:
219
+ frame = read_buffer.get()
220
+ if frame is None:
221
+ break
222
+ I0 = I1
223
+ I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
224
+ I1 = pad_image(I1)
225
+ I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False)
226
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
227
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
228
+
229
+ break_flag = False
230
+ if ssim > 0.996:
231
+ frame = read_buffer.get() # read a new frame
232
+ if frame is None:
233
+ break_flag = True
234
+ frame = lastframe
235
+ else:
236
+ temp = frame
237
+ I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
238
+ I1 = pad_image(I1)
239
+ I1 = model.inference(I0, I1, args.scale)
240
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
241
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
242
+ frame = (I1[0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w]
243
+
244
+ if ssim < 0.2:
245
+ output = []
246
+ for i in range(args.multi - 1):
247
+ output.append(I0)
248
+ '''
249
+ output = []
250
+ step = 1 / args.multi
251
+ alpha = 0
252
+ for i in range(args.multi - 1):
253
+ alpha += step
254
+ beta = 1-alpha
255
+ output.append(torch.from_numpy(np.transpose((cv2.addWeighted(frame[:, :, ::-1], alpha, lastframe[:, :, ::-1], beta, 0)[:, :, ::-1].copy()), (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.)
256
+ '''
257
+ else:
258
+ output = make_inference(I0, I1, args.multi-1)
259
+
260
+ if args.montage:
261
+ write_buffer.put(np.concatenate((lastframe, lastframe), 1))
262
+ for mid in output:
263
+ mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
264
+ write_buffer.put(np.concatenate((lastframe, mid[:h, :w]), 1))
265
+ else:
266
+ write_buffer.put(lastframe)
267
+ for mid in output:
268
+ mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
269
+ write_buffer.put(mid[:h, :w])
270
+ pbar.update(1)
271
+ lastframe = frame
272
+ if break_flag:
273
+ break
274
+
275
+ if args.montage:
276
+ write_buffer.put(np.concatenate((lastframe, lastframe), 1))
277
+ else:
278
+ write_buffer.put(lastframe)
279
+ import time
280
+ while(not write_buffer.empty()):
281
+ time.sleep(0.1)
282
+ pbar.close()
283
+ if not vid_out is None:
284
+ vid_out.release()
285
+
286
+ # move audio to new video file if appropriate
287
+ # if args.png == False and fpsNotAssigned == True and not args.video is None:
288
+ # try:
289
+ # transferAudio(args.video, vid_out_name)
290
+ # except:
291
+ # print("Audio transfer failed. Interpolated video will have no audio")
292
+ # targetNoAudio = os.path.splitext(vid_out_name)[0] + "_noaudio" + os.path.splitext(vid_out_name)[1]
293
+ # os.rename(targetNoAudio, vid_out_name)
inference_video_enhance.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import argparse
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ from torch.nn import functional as F
8
+ import warnings
9
+ import _thread
10
+ import skvideo.io
11
+ from queue import Queue, Empty
12
+ from model.pytorch_msssim import ssim_matlab
13
+
14
+ warnings.filterwarnings("ignore")
15
+
16
+ def transferAudio(sourceVideo, targetVideo):
17
+ import shutil
18
+ import moviepy.editor
19
+ tempAudioFileName = "./temp/audio.mkv"
20
+
21
+ # split audio from original video file and store in "temp" directory
22
+ if True:
23
+
24
+ # clear old "temp" directory if it exits
25
+ if os.path.isdir("temp"):
26
+ # remove temp directory
27
+ shutil.rmtree("temp")
28
+ # create new "temp" directory
29
+ os.makedirs("temp")
30
+ # extract audio from video
31
+ os.system('ffmpeg -y -i "{}" -c:a copy -vn {}'.format(sourceVideo, tempAudioFileName))
32
+
33
+ targetNoAudio = os.path.splitext(targetVideo)[0] + "_noaudio" + os.path.splitext(targetVideo)[1]
34
+ os.rename(targetVideo, targetNoAudio)
35
+ # combine audio file and new video file
36
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
37
+
38
+ if os.path.getsize(targetVideo) == 0: # if ffmpeg failed to merge the video and audio together try converting the audio to aac
39
+ tempAudioFileName = "./temp/audio.m4a"
40
+ os.system('ffmpeg -y -i "{}" -c:a aac -b:a 160k -vn {}'.format(sourceVideo, tempAudioFileName))
41
+ os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
42
+ if (os.path.getsize(targetVideo) == 0): # if aac is not supported by selected format
43
+ os.rename(targetNoAudio, targetVideo)
44
+ print("Audio transfer failed. Interpolated video will have no audio")
45
+ else:
46
+ print("Lossless audio transfer failed. Audio was transcoded to AAC (M4A) instead.")
47
+
48
+ # remove audio-less video
49
+ os.remove(targetNoAudio)
50
+ else:
51
+ os.remove(targetNoAudio)
52
+
53
+ # remove temp directory
54
+ shutil.rmtree("temp")
55
+
56
+ parser = argparse.ArgumentParser(description='Video SR')
57
+ parser.add_argument('--video', dest='video', type=str, default=None)
58
+ parser.add_argument('--output', dest='output', type=str, default=None)
59
+ parser.add_argument('--img', dest='img', type=str, default=None)
60
+ parser.add_argument('--model', dest='modelDir', type=str, default='train_log_SAFA', help='directory with trained model files')
61
+ parser.add_argument('--fp16', dest='fp16', action='store_true', help='fp16 mode for faster and more lightweight inference on cards with Tensor Cores')
62
+ parser.add_argument('--png', dest='png', action='store_true', help='whether to vid_out png format vid_outs')
63
+ parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='vid_out video extension')
64
+
65
+ args = parser.parse_args()
66
+ assert (not args.video is None or not args.img is None)
67
+ if not args.img is None:
68
+ args.png = True
69
+
70
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
71
+ torch.set_grad_enabled(False)
72
+ if torch.cuda.is_available():
73
+ torch.backends.cudnn.enabled = True
74
+ torch.backends.cudnn.benchmark = True
75
+ if(args.fp16):
76
+ print('set fp16')
77
+ torch.set_default_tensor_type(torch.cuda.HalfTensor)
78
+
79
+ try:
80
+ from train_log_SAFA.model import Model
81
+ except:
82
+ print("Please download our model from model list")
83
+ model = Model()
84
+ model.device()
85
+ model.load_model(args.modelDir)
86
+ print("Loaded SAFA model.")
87
+ model.eval()
88
+
89
+ if not args.video is None:
90
+ videoCapture = cv2.VideoCapture(args.video)
91
+ fps = videoCapture.get(cv2.CAP_PROP_FPS)
92
+ tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
93
+ videoCapture.release()
94
+ fpsNotAssigned = True
95
+ videogen = skvideo.io.vreader(args.video)
96
+ lastframe = next(videogen)
97
+ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
98
+ video_path_wo_ext, ext = os.path.splitext(args.video)
99
+ if args.png == False and fpsNotAssigned == True:
100
+ print("The audio will be merged after interpolation process")
101
+ else:
102
+ print("Will not merge audio because using png or fps flag!")
103
+ else:
104
+ videogen = []
105
+ for f in os.listdir(args.img):
106
+ if 'png' in f:
107
+ videogen.append(f)
108
+ tot_frame = len(videogen)
109
+ videogen.sort(key= lambda x:int(x[:-4]))
110
+ lastframe = cv2.imread(os.path.join(args.img, videogen[0]), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
111
+ videogen = videogen[1:]
112
+
113
+ h, w, _ = lastframe.shape
114
+
115
+ vid_out_name = None
116
+ vid_out = None
117
+ if args.png:
118
+ if not os.path.exists('vid_out'):
119
+ os.mkdir('vid_out')
120
+ else:
121
+ if args.output is not None:
122
+ vid_out_name = args.output
123
+ else:
124
+ vid_out_name = '{}_2X{}'.format(video_path_wo_ext, ext)
125
+ vid_out = cv2.VideoWriter(vid_out_name, fourcc, fps, (w, h))
126
+
127
+ def clear_write_buffer(user_args, write_buffer):
128
+ cnt = 0
129
+ while True:
130
+ item = write_buffer.get()
131
+ if item is None:
132
+ break
133
+ if user_args.png:
134
+ cv2.imwrite('vid_out/{:0>7d}.png'.format(cnt), item[:, :, ::-1])
135
+ cnt += 1
136
+ else:
137
+ vid_out.write(item[:, :, ::-1])
138
+
139
+ def build_read_buffer(user_args, read_buffer, videogen):
140
+ for frame in videogen:
141
+ if not user_args.img is None:
142
+ frame = cv2.imread(os.path.join(user_args.img, frame), cv2.IMREAD_UNCHANGED)[:, :, ::-1].copy()
143
+ # if user_args.montage:
144
+ # frame = frame[:, left: left + w]
145
+ read_buffer.put(frame)
146
+ read_buffer.put(None)
147
+
148
+ def pad_image(img):
149
+ if(args.fp16):
150
+ return F.pad(img, padding, mode='reflect').half()
151
+ else:
152
+ return F.pad(img, padding, mode='reflect')
153
+
154
+ tmp = 64
155
+ ph = ((h - 1) // tmp + 1) * tmp
156
+ pw = ((w - 1) // tmp + 1) * tmp
157
+ padding = (0, pw - w, 0, ph - h)
158
+ pbar = tqdm(total=tot_frame)
159
+ write_buffer = Queue(maxsize=500)
160
+ read_buffer = Queue(maxsize=500)
161
+ _thread.start_new_thread(build_read_buffer, (args, read_buffer, videogen))
162
+ _thread.start_new_thread(clear_write_buffer, (args, write_buffer))
163
+
164
+ while True:
165
+ frame = read_buffer.get()
166
+ if frame is None:
167
+ break
168
+ # lastframe_2x = cv2.resize(lastframe, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
169
+ # frame_2x = cv2.resize(frame, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
170
+ I0 = pad_image(torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.)
171
+ I1 = pad_image(torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.)
172
+ I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False)
173
+ I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
174
+ ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
175
+ if ssim < 0.2:
176
+ out = [model.inference(I0, I0, [0])[0], model.inference(I1, I1, [0])[0]]
177
+ else:
178
+ out = model.inference(I0, I1, [0, 1])
179
+ assert(len(out) == 2)
180
+ write_buffer.put((out[0][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
181
+ write_buffer.put((out[1][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
182
+ lastframe = read_buffer.get()
183
+ if lastframe is None:
184
+ break
185
+ pbar.update(2)
186
+
187
+ import time
188
+ while(not write_buffer.empty()):
189
+ time.sleep(0.1)
190
+ pbar.close()
191
+ if not vid_out is None:
192
+ vid_out.release()
193
+
194
+ # move audio to new video file if appropriate
195
+ if args.png == False and fpsNotAssigned == True and not args.video is None:
196
+ try:
197
+ transferAudio(args.video, vid_out_name)
198
+ except:
199
+ print("Audio transfer failed. Interpolated video will have no audio")
200
+ targetNoAudio = os.path.splitext(vid_out_name)[0] + "_noaudio" + os.path.splitext(vid_out_name)[1]
201
+ os.rename(targetNoAudio, vid_out_name)
installer/installer.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ import shutil
5
+ import site
6
+ import subprocess
7
+ import sys
8
+
9
+
10
+ script_dir = os.getcwd()
11
+
12
+
13
+ def run_cmd(cmd, capture_output=False, env=None):
14
+ # Run shell commands
15
+ return subprocess.run(cmd, shell=True, capture_output=capture_output, env=env)
16
+
17
+
18
+ def check_env():
19
+ # If we have access to conda, we are probably in an environment
20
+ conda_not_exist = run_cmd("conda", capture_output=True).returncode
21
+ if conda_not_exist:
22
+ print("Conda is not installed. Exiting...")
23
+ sys.exit()
24
+
25
+ # Ensure this is a new environment and not the base environment
26
+ if os.environ["CONDA_DEFAULT_ENV"] == "base":
27
+ print("Create an environment for this project and activate it. Exiting...")
28
+ sys.exit()
29
+
30
+
31
+ def install_dependencies():
32
+ global MY_PATH
33
+
34
+ # Install Git and clone repo
35
+ run_cmd("conda install -y -k git")
36
+ run_cmd("git clone https://github.com/C0untFloyd/roop-unleashed.git")
37
+ os.chdir(MY_PATH)
38
+ run_cmd("git checkout c8643a0532f09f84397aaacf526e66db6455d399")
39
+ # Installs dependencies from requirements.txt
40
+ run_cmd("python -m pip install -r requirements.txt")
41
+
42
+
43
+
44
+ def update_dependencies():
45
+ global MY_PATH
46
+
47
+ os.chdir(MY_PATH)
48
+ # do a hard reset for to update even if there are local changes
49
+ run_cmd("git fetch --all")
50
+ run_cmd("git reset --hard origin/main")
51
+ run_cmd("git pull")
52
+ # Installs/Updates dependencies from all requirements.txt
53
+ run_cmd("python -m pip install -r requirements.txt")
54
+
55
+
56
+ def start_app():
57
+ global MY_PATH
58
+
59
+ os.chdir(MY_PATH)
60
+ # forward commandline arguments
61
+ sys.argv.pop(0)
62
+ args = ' '.join(sys.argv)
63
+ print("Launching App")
64
+ run_cmd(f'python run.py {args}')
65
+
66
+
67
+ if __name__ == "__main__":
68
+ global MY_PATH
69
+
70
+ MY_PATH = "roop-unleashed"
71
+
72
+
73
+ # Verifies we are in a conda environment
74
+ check_env()
75
+
76
+ # If webui has already been installed, skip and run
77
+ if not os.path.exists(MY_PATH):
78
+ install_dependencies()
79
+ else:
80
+ # moved update from batch to here, because of batch limitations
81
+ updatechoice = input("Check for Updates? [y/n]").lower()
82
+ if updatechoice == "y":
83
+ update_dependencies()
84
+
85
+ # Run the model with webui
86
+ os.chdir(script_dir)
87
+ start_app()
installer/windows_run.bat ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ REM No CLI arguments supported anymore
4
+ set COMMANDLINE_ARGS=
5
+
6
+ cd /D "%~dp0"
7
+
8
+ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
9
+
10
+ set PATH=%PATH%;%SystemRoot%\system32
11
+
12
+ @rem config
13
+ set INSTALL_DIR=%cd%\installer_files
14
+ set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
15
+ set INSTALL_ENV_DIR=%cd%\installer_files\env
16
+ set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe
17
+ set FFMPEG_DOWNLOAD_URL=https://github.com/GyanD/codexffmpeg/releases/download/2023-06-21-git-1bcb8a7338/ffmpeg-2023-06-21-git-1bcb8a7338-essentials_build.zip
18
+ set INSTALL_FFMPEG_DIR=%cd%\installer_files\ffmpeg
19
+ set INSIGHTFACE_PACKAGE_URL=https://github.com/C0untFloyd/roop-unleashed/releases/download/3.6.6/insightface-0.7.3-cp310-cp310-win_amd64.whl
20
+ set INSIGHTFACE_PACKAGE_PATH=%INSTALL_DIR%\insightface-0.7.3-cp310-cp310-win_amd64.whl
21
+
22
+ set conda_exists=F
23
+ set ffmpeg_exists=F
24
+
25
+ @rem figure out whether git and conda needs to be installed
26
+ call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1
27
+ if "%ERRORLEVEL%" EQU "0" set conda_exists=T
28
+
29
+ @rem Check if FFmpeg is already in PATH
30
+ where ffmpeg >nul 2>&1
31
+ if "%ERRORLEVEL%" EQU "0" (
32
+ echo FFmpeg is already installed.
33
+ set ffmpeg_exists=T
34
+ )
35
+
36
+ @rem (if necessary) install git and conda into a contained environment
37
+
38
+ @rem download conda
39
+ if "%conda_exists%" == "F" (
40
+ echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe
41
+ mkdir "%INSTALL_DIR%"
42
+ call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end )
43
+ echo Installing Miniconda to %CONDA_ROOT_PREFIX%
44
+ start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX%
45
+
46
+ @rem test the conda binary
47
+ echo Miniconda version:
48
+ call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end )
49
+ )
50
+
51
+ @rem create the installer env
52
+ if not exist "%INSTALL_ENV_DIR%" (
53
+ echo Creating Conda Environment
54
+ call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo ERROR: Conda environment creation failed. && goto end )
55
+ @rem check if conda environment was actually created
56
+ if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo ERROR: Conda environment is empty. && goto end )
57
+ @rem activate installer env
58
+ call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo ERROR: Miniconda hook not found. && goto end )
59
+ @rem Download insightface package
60
+ echo Downloading insightface package from %INSIGHTFACE_PACKAGE_URL% to %INSIGHTFACE_PACKAGE_PATH%
61
+ call curl -Lk "%INSIGHTFACE_PACKAGE_URL%" > "%INSIGHTFACE_PACKAGE_PATH%" || ( echo. && echo ERROR: Insightface package failed to download. && goto end )
62
+ @rem install insightface package using pip
63
+ echo Installing insightface package
64
+ call pip install "%INSIGHTFACE_PACKAGE_PATH%" || ( echo. && echo ERROR: Insightface package installation failed. && goto end )
65
+ )
66
+
67
+ @rem Download and install FFmpeg if not already installed
68
+ if "%ffmpeg_exists%" == "F" (
69
+ if not exist "%INSTALL_FFMPEG_DIR%" (
70
+ echo Downloading ffmpeg from %FFMPEG_DOWNLOAD_URL% to %INSTALL_DIR%
71
+ call curl -Lk "%FFMPEG_DOWNLOAD_URL%" > "%INSTALL_DIR%\ffmpeg.zip" || ( echo. && echo ffmpeg failed to download. && goto end )
72
+ call powershell -command "Expand-Archive -Force '%INSTALL_DIR%\ffmpeg.zip' '%INSTALL_DIR%\'"
73
+ cd "installer_files"
74
+ setlocal EnableExtensions EnableDelayedExpansion
75
+ for /f "tokens=*" %%f in ('dir /s /b /ad "ffmpeg\*"') do (
76
+ ren "%%f" "ffmpeg"
77
+ )
78
+ endlocal
79
+ setx PATH "%INSTALL_FFMPEG_DIR%\bin\;%PATH%"
80
+ echo To use videos, you need to restart roop after this installation.
81
+ cd ..
82
+ )
83
+ ) else (
84
+ echo Skipping FFmpeg installation as it is already available.
85
+ )
86
+
87
+ @rem setup installer env
88
+ @rem check if conda environment was actually created
89
+ if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo ERROR: Conda environment is empty. && goto end )
90
+ @rem activate installer env
91
+ call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo ERROR: Miniconda hook not found. && goto end )
92
+ echo Launching roop unleashed
93
+ call python installer.py %COMMANDLINE_ARGS%
94
+
95
+ echo.
96
+ echo Done!
97
+
98
+ :end
99
+ pause
model/__pycache__/loss.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
model/__pycache__/warplayer.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
model/loss.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import torchvision.models as models
6
+
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+
10
+ class EPE(nn.Module):
11
+ def __init__(self):
12
+ super(EPE, self).__init__()
13
+
14
+ def forward(self, flow, gt, loss_mask):
15
+ loss_map = (flow - gt.detach()) ** 2
16
+ loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5
17
+ return (loss_map * loss_mask)
18
+
19
+
20
+ class Ternary(nn.Module):
21
+ def __init__(self):
22
+ super(Ternary, self).__init__()
23
+ patch_size = 7
24
+ out_channels = patch_size * patch_size
25
+ self.w = np.eye(out_channels).reshape(
26
+ (patch_size, patch_size, 1, out_channels))
27
+ self.w = np.transpose(self.w, (3, 2, 0, 1))
28
+ self.w = torch.tensor(self.w).float().to(device)
29
+
30
+ def transform(self, img):
31
+ patches = F.conv2d(img, self.w, padding=3, bias=None)
32
+ transf = patches - img
33
+ transf_norm = transf / torch.sqrt(0.81 + transf**2)
34
+ return transf_norm
35
+
36
+ def rgb2gray(self, rgb):
37
+ r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :]
38
+ gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
39
+ return gray
40
+
41
+ def hamming(self, t1, t2):
42
+ dist = (t1 - t2) ** 2
43
+ dist_norm = torch.mean(dist / (0.1 + dist), 1, True)
44
+ return dist_norm
45
+
46
+ def valid_mask(self, t, padding):
47
+ n, _, h, w = t.size()
48
+ inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
49
+ mask = F.pad(inner, [padding] * 4)
50
+ return mask
51
+
52
+ def forward(self, img0, img1):
53
+ img0 = self.transform(self.rgb2gray(img0))
54
+ img1 = self.transform(self.rgb2gray(img1))
55
+ return self.hamming(img0, img1) * self.valid_mask(img0, 1)
56
+
57
+
58
+ class SOBEL(nn.Module):
59
+ def __init__(self):
60
+ super(SOBEL, self).__init__()
61
+ self.kernelX = torch.tensor([
62
+ [1, 0, -1],
63
+ [2, 0, -2],
64
+ [1, 0, -1],
65
+ ]).float()
66
+ self.kernelY = self.kernelX.clone().T
67
+ self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device)
68
+ self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device)
69
+
70
+ def forward(self, pred, gt):
71
+ N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3]
72
+ img_stack = torch.cat(
73
+ [pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0)
74
+ sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1)
75
+ sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1)
76
+ pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:]
77
+ pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:]
78
+
79
+ L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y)
80
+ loss = (L1X+L1Y)
81
+ return loss
82
+
83
+ class MeanShift(nn.Conv2d):
84
+ def __init__(self, data_mean, data_std, data_range=1, norm=True):
85
+ c = len(data_mean)
86
+ super(MeanShift, self).__init__(c, c, kernel_size=1)
87
+ std = torch.Tensor(data_std)
88
+ self.weight.data = torch.eye(c).view(c, c, 1, 1)
89
+ if norm:
90
+ self.weight.data.div_(std.view(c, 1, 1, 1))
91
+ self.bias.data = -1 * data_range * torch.Tensor(data_mean)
92
+ self.bias.data.div_(std)
93
+ else:
94
+ self.weight.data.mul_(std.view(c, 1, 1, 1))
95
+ self.bias.data = data_range * torch.Tensor(data_mean)
96
+ self.requires_grad = False
97
+
98
+ class VGGPerceptualLoss(torch.nn.Module):
99
+ def __init__(self, rank=0):
100
+ super(VGGPerceptualLoss, self).__init__()
101
+ blocks = []
102
+ pretrained = True
103
+ self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features
104
+ self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda()
105
+ for param in self.parameters():
106
+ param.requires_grad = False
107
+
108
+ def forward(self, X, Y, indices=None):
109
+ X = self.normalize(X)
110
+ Y = self.normalize(Y)
111
+ indices = [2, 7, 12, 21, 30]
112
+ weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5]
113
+ k = 0
114
+ loss = 0
115
+ for i in range(indices[-1]):
116
+ X = self.vgg_pretrained_features[i](X)
117
+ Y = self.vgg_pretrained_features[i](Y)
118
+ if (i+1) in indices:
119
+ loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1
120
+ k += 1
121
+ return loss
122
+
123
+ if __name__ == '__main__':
124
+ img0 = torch.zeros(3, 3, 256, 256).float().to(device)
125
+ img1 = torch.tensor(np.random.normal(
126
+ 0, 1, (3, 3, 256, 256))).float().to(device)
127
+ ternary_loss = Ternary()
128
+ print(ternary_loss(img0, img1).shape)
model/pytorch_msssim/__init__.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from math import exp
4
+ import numpy as np
5
+
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ def gaussian(window_size, sigma):
9
+ gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
10
+ return gauss/gauss.sum()
11
+
12
+
13
+ def create_window(window_size, channel=1):
14
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
15
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0).to(device)
16
+ window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
17
+ return window
18
+
19
+ def create_window_3d(window_size, channel=1):
20
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
21
+ _2D_window = _1D_window.mm(_1D_window.t())
22
+ _3D_window = _2D_window.unsqueeze(2) @ (_1D_window.t())
23
+ window = _3D_window.expand(1, channel, window_size, window_size, window_size).contiguous().to(device)
24
+ return window
25
+
26
+
27
+ def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
28
+ # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
29
+ if val_range is None:
30
+ if torch.max(img1) > 128:
31
+ max_val = 255
32
+ else:
33
+ max_val = 1
34
+
35
+ if torch.min(img1) < -0.5:
36
+ min_val = -1
37
+ else:
38
+ min_val = 0
39
+ L = max_val - min_val
40
+ else:
41
+ L = val_range
42
+
43
+ padd = 0
44
+ (_, channel, height, width) = img1.size()
45
+ if window is None:
46
+ real_size = min(window_size, height, width)
47
+ window = create_window(real_size, channel=channel).to(img1.device)
48
+
49
+ # mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
50
+ # mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
51
+ mu1 = F.conv2d(F.pad(img1, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
52
+ mu2 = F.conv2d(F.pad(img2, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
53
+
54
+ mu1_sq = mu1.pow(2)
55
+ mu2_sq = mu2.pow(2)
56
+ mu1_mu2 = mu1 * mu2
57
+
58
+ sigma1_sq = F.conv2d(F.pad(img1 * img1, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_sq
59
+ sigma2_sq = F.conv2d(F.pad(img2 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu2_sq
60
+ sigma12 = F.conv2d(F.pad(img1 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_mu2
61
+
62
+ C1 = (0.01 * L) ** 2
63
+ C2 = (0.03 * L) ** 2
64
+
65
+ v1 = 2.0 * sigma12 + C2
66
+ v2 = sigma1_sq + sigma2_sq + C2
67
+ cs = torch.mean(v1 / v2) # contrast sensitivity
68
+
69
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
70
+
71
+ if size_average:
72
+ ret = ssim_map.mean()
73
+ else:
74
+ ret = ssim_map.mean(1).mean(1).mean(1)
75
+
76
+ if full:
77
+ return ret, cs
78
+ return ret
79
+
80
+
81
+ def ssim_matlab(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
82
+ # Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
83
+ if val_range is None:
84
+ if torch.max(img1) > 128:
85
+ max_val = 255
86
+ else:
87
+ max_val = 1
88
+
89
+ if torch.min(img1) < -0.5:
90
+ min_val = -1
91
+ else:
92
+ min_val = 0
93
+ L = max_val - min_val
94
+ else:
95
+ L = val_range
96
+
97
+ padd = 0
98
+ (_, _, height, width) = img1.size()
99
+ if window is None:
100
+ real_size = min(window_size, height, width)
101
+ window = create_window_3d(real_size, channel=1).to(img1.device)
102
+ # Channel is set to 1 since we consider color images as volumetric images
103
+
104
+ img1 = img1.unsqueeze(1)
105
+ img2 = img2.unsqueeze(1)
106
+
107
+ mu1 = F.conv3d(F.pad(img1, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
108
+ mu2 = F.conv3d(F.pad(img2, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
109
+
110
+ mu1_sq = mu1.pow(2)
111
+ mu2_sq = mu2.pow(2)
112
+ mu1_mu2 = mu1 * mu2
113
+
114
+ sigma1_sq = F.conv3d(F.pad(img1 * img1, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_sq
115
+ sigma2_sq = F.conv3d(F.pad(img2 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu2_sq
116
+ sigma12 = F.conv3d(F.pad(img1 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_mu2
117
+
118
+ C1 = (0.01 * L) ** 2
119
+ C2 = (0.03 * L) ** 2
120
+
121
+ v1 = 2.0 * sigma12 + C2
122
+ v2 = sigma1_sq + sigma2_sq + C2
123
+ cs = torch.mean(v1 / v2) # contrast sensitivity
124
+
125
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
126
+
127
+ if size_average:
128
+ ret = ssim_map.mean()
129
+ else:
130
+ ret = ssim_map.mean(1).mean(1).mean(1)
131
+
132
+ if full:
133
+ return ret, cs
134
+ return ret
135
+
136
+
137
+ def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
138
+ device = img1.device
139
+ weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
140
+ levels = weights.size()[0]
141
+ mssim = []
142
+ mcs = []
143
+ for _ in range(levels):
144
+ sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
145
+ mssim.append(sim)
146
+ mcs.append(cs)
147
+
148
+ img1 = F.avg_pool2d(img1, (2, 2))
149
+ img2 = F.avg_pool2d(img2, (2, 2))
150
+
151
+ mssim = torch.stack(mssim)
152
+ mcs = torch.stack(mcs)
153
+
154
+ # Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
155
+ if normalize:
156
+ mssim = (mssim + 1) / 2
157
+ mcs = (mcs + 1) / 2
158
+
159
+ pow1 = mcs ** weights
160
+ pow2 = mssim ** weights
161
+ # From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
162
+ output = torch.prod(pow1[:-1] * pow2[-1])
163
+ return output
164
+
165
+
166
+ # Classes to re-use window
167
+ class SSIM(torch.nn.Module):
168
+ def __init__(self, window_size=11, size_average=True, val_range=None):
169
+ super(SSIM, self).__init__()
170
+ self.window_size = window_size
171
+ self.size_average = size_average
172
+ self.val_range = val_range
173
+
174
+ # Assume 3 channel for SSIM
175
+ self.channel = 3
176
+ self.window = create_window(window_size, channel=self.channel)
177
+
178
+ def forward(self, img1, img2):
179
+ (_, channel, _, _) = img1.size()
180
+
181
+ if channel == self.channel and self.window.dtype == img1.dtype:
182
+ window = self.window
183
+ else:
184
+ window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
185
+ self.window = window
186
+ self.channel = channel
187
+
188
+ _ssim = ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
189
+ dssim = (1 - _ssim) / 2
190
+ return dssim
191
+
192
+ class MSSSIM(torch.nn.Module):
193
+ def __init__(self, window_size=11, size_average=True, channel=3):
194
+ super(MSSSIM, self).__init__()
195
+ self.window_size = window_size
196
+ self.size_average = size_average
197
+ self.channel = channel
198
+
199
+ def forward(self, img1, img2):
200
+ return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
model/pytorch_msssim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
model/warplayer.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
5
+ backwarp_tenGrid = {}
6
+
7
+
8
+ def warp(tenInput, tenFlow):
9
+ k = (str(tenFlow.device), str(tenFlow.size()))
10
+ if k not in backwarp_tenGrid:
11
+ tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view(
12
+ 1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
13
+ tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view(
14
+ 1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
15
+ backwarp_tenGrid[k] = torch.cat(
16
+ [tenHorizontal, tenVertical], 1).to(device)
17
+
18
+ tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
19
+ tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
20
+
21
+ g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)
22
+ return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True)
models/CLIP/rd64-uni-refined.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4956f9a7978a75630b08c9d6ec075b7c51cf43b4751b686e3a011d4012ddc9d
3
+ size 4720707
models/CodeFormer/CodeFormerv0.1.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aa48fc4b21224d85784c9a58885201284ec8e590b988126db2c07495b421d36
3
+ size 376821951
models/DMDNet.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70daeb4b1fd10f241043b587d892a941f2651d7322db02f06ff64b166537f65c
3
+ size 603684323
models/Frame/deoldify_artistic.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be026e17c47c85527b3084cacad352f7ca0e021c33aa827062c5997ebe72c61f
3
+ size 255024891
models/Frame/deoldify_stable.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98d69dbecde018fe3d630a35ac850ac590b23e359c8349d8404b467bbfe4a0b9
3
+ size 873359997