abdullahyang commited on
Commit
667baea
·
verified ·
1 Parent(s): d72bafb

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Roboto-VariableFont_wdth,wght.ttf filter=lfs diff=lfs merge=lfs -text
37
+ SkeletonDiffusion_Demo/magick filter=lfs diff=lfs merge=lfs -text
38
+ magick filter=lfs diff=lfs merge=lfs -text
39
+ models/nlf_l_multi.torchscript filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # PyPI configuration file
171
+ .pypirc
172
+
173
+ 9622_GRAB/
174
+
175
+ *.pkl
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
  title: SkeletonDiffusion
3
- emoji: 📈
4
- colorFrom: green
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.20.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: SkeletonDiffusion
3
+ emoji: 💻
4
+ colorFrom: purple
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 5.12.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
Roboto-VariableFont_wdth,wght.ttf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b25986d18730960c7b27384ab2bc500856ae7fe9e71c9850019195ff9019f0b2
3
+ size 468308
SkeletonDiffusion_Demo/.gitignore ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # PyPI configuration file
171
+ .pypirc
172
+
173
+ test/
SkeletonDiffusion_Demo/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generate gifs from sequences of obj files
2
+ ## Environment
3
+ Sorry, there is no environment file (the env I am currently using contains packages that are not necessary here). Here the version of a few necessary modules. You will have to install other modules as warning appears once you try running the code.
4
+ ```
5
+ vedo 2024.5.1
6
+ vpython 7.6.5
7
+ python 3.10.13
8
+ ```
9
+
10
+ ## Run the code
11
+ ### Prepare the data
12
+ Copy the directory '/usr/wiss/curreli/work/my_modules/Skeleton2Mesh/final_output/hmp/visuals_50samples/amass/SkeletonDiffusion/test' to this folder (I already gave you the rights - afterwards I will delete it). There are meshes for two test segments from AMASS, 9622_GRAB and 11949_HUMAN4D. The predictions are named according to the following convention:
13
+ - obs: Past obervation (input sequence)
14
+ - gt: GT future
15
+ - pred_closest_GT: prediction that is most similar to the GT
16
+ - most_far_<idx>: the remaining predictions are sorted accordng to their similarity to the GT future, or better to the similarity to the prediction closest to the GT. So most_far_0 is the prediction most diverse from the GT
17
+
18
+ In the demo we will not have a GT future. We will order the predictions according to a different criterion (maybe diversity).
19
+
20
+ ### Generate visuals
21
+ Now you can run
22
+ ```
23
+ python plot_several_meshes.py --mesh_parent_folder=<put_correct_path_here/9622_GRAB>
24
+ ```
25
+ The output should be in a parallel direction at lowest folder level.
26
+ ## If you are using a ssh connection
27
+ You probably have no screen, so the code terminates. You can avoid the problem by generating a screen first with our script setup_headless.bash:
28
+ ```
29
+ setup_headless.bash python plot_several_meshes.py <...>
30
+ ```
31
+ ### Debug via ssh connection
32
+ OPen the file of your VS Studio debug configurations, launch.json, and add this new configuration:
33
+
34
+ ```
35
+ { "name": "Python: Attach to Edge",
36
+ "request": "attach", "type": "python",
37
+ "connect": { "host": "localhost", "port": 5678 },
38
+ "justMyCode": true,
39
+ },
40
+ ```
41
+ Now start the file in debugger mode with debugpy
42
+ ```
43
+ ./setup_headless.bash python -m debugpy --listen 5678 --wait-for-client plot_several_meshes.py
44
+ ```
45
+
46
+ Now start your debug configuraiton ""Python: Attach to Edge" to attach your debugger and start debugging.
47
+
48
+ ## For the future
49
+ If you need to put more functions together consider using shell cripts
50
+ ```
51
+ bash create_mesh_gifs.sh
52
+ ```
SkeletonDiffusion_Demo/combine_video.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import tempfile
2
+ # from moviepy import *
3
+
4
+ # def combine_video(sample_obj_path):
5
+ # gif1 = VideoFileClip(sample_obj_path+ "/shadow_gif/gt_obj_tranp.gif")
6
+ # gif2 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_closest_GT_obj_tranp.gif")
7
+ # gif3 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_far_0_obj_tranp.gif")
8
+
9
+ # common_duration = min(gif1.duration, gif2.duration, gif3.duration)
10
+ # gif1 = gif1.subclipped(0, common_duration)
11
+ # gif2 = gif2.subclipped(0, common_duration)
12
+ # gif3 = gif3.subclipped(0, common_duration)
13
+ # print(gif1.duration, gif2.duration, gif3.duration)
14
+
15
+ # text1 = TextClip(font = 'Roboto-VariableFont_wdth,wght.ttf', text="Ground Truth", font_size=30, color='black').with_position('center').with_duration(common_duration)
16
+ # text2 = TextClip(font = 'Roboto-VariableFont_wdth,wght.ttf', text="Generated_1", font_size=30, color='black').with_position('center').with_duration(common_duration)
17
+ # text3 = TextClip(font = 'Roboto-VariableFont_wdth,wght.ttf', text="Generated_2", font_size=30, color='black').with_position('center').with_duration(common_duration)
18
+
19
+ # gif1_with_text = CompositeVideoClip([gif1, text1.with_position(("center", "top"))])
20
+ # gif2_with_text = CompositeVideoClip([gif2, text2.with_position(("center", "top"))])
21
+ # gif3_with_text = CompositeVideoClip([gif3, text3.with_position(("center", "top"))])
22
+
23
+ # combined = clips_array([[gif1_with_text, gif2_with_text, gif3_with_text]])
24
+ # # combined = clips_array([[gif1, gif2, gif3]])
25
+
26
+ # target_duration = 2.5
27
+ # original_duration = combined.duration
28
+ # speed_factor = original_duration / target_duration
29
+
30
+ # accelerated_video = combined.with_speed_scaled(factor=speed_factor*3)
31
+
32
+ # # accelerated_video.write_videofile("combined_video.mp4", fps=60)
33
+ # output_path = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
34
+ # accelerated_video.write_gif(output_path, fps=60)
35
+ # return output_path
36
+
37
+ # if __name__ == "__main__":
38
+ # combine_video("./9622_GRAB/")
39
+
40
+ import tempfile, os
41
+ from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip, clips_array, vfx
42
+
43
+ def combine_video(sample_obj_path):
44
+ # os.environ['IMAGEMAGICK_BINARY'] = '/home/stud/yaji/storage/user/yaji/NonisotropicSkeletonDiffusion/magick'
45
+ gif1 = VideoFileClip(sample_obj_path+ "/shadow_gif/gt_obj_tranp.gif")
46
+ gif2 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_closest_GT_obj_tranp.gif")
47
+ gif3 = VideoFileClip(sample_obj_path+ "/shadow_gif/pred_far_0_obj_tranp.gif")
48
+
49
+ common_duration = min(gif1.duration, gif2.duration, gif3.duration)
50
+ gif1 = gif1.subclip(0, common_duration)
51
+ gif2 = gif2.subclip(0, common_duration)
52
+ gif3 = gif3.subclip(0, common_duration)
53
+ print(gif1.duration, gif2.duration, gif3.duration)
54
+
55
+ text1 = TextClip("Ground Truth", fontsize=30, color='black').set_position('center').set_duration(common_duration)
56
+ text2 = TextClip("Generated_1", fontsize=30, color='black').set_position('center').set_duration(common_duration)
57
+ text3 = TextClip("Generated_2", fontsize=30, color='black').set_position('center').set_duration(common_duration)
58
+
59
+ gif1_with_text = CompositeVideoClip([gif1, text1.set_position(("center", "top"))])
60
+ gif2_with_text = CompositeVideoClip([gif2, text2.set_position(("center", "top"))])
61
+ gif3_with_text = CompositeVideoClip([gif3, text3.set_position(("center", "top"))])
62
+
63
+ combined = clips_array([[gif1_with_text, gif2_with_text, gif3_with_text]])
64
+ # combined = clips_array([[gif1, gif2, gif3]])
65
+
66
+ target_duration = 2.5
67
+ original_duration = combined.duration
68
+ speed_factor = original_duration / target_duration
69
+
70
+ accelerated_video = combined.fx(vfx.speedx, factor=speed_factor)
71
+
72
+ output_path = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
73
+ accelerated_video.write_gif(output_path, fps=60)
74
+ return output_path
75
+
76
+ if __name__ == "__main__":
77
+ combine_video("./9622_GRAB/")
SkeletonDiffusion_Demo/create_mesh_gifs.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ export IMAGEMAGICK_BINARY=/home/stud/yaji/storage/user/yaji/SkeletonDiffusion_Demo/magick
2
+
3
+ python plot_several_meshes.py -f <you_custom_path>
SkeletonDiffusion_Demo/magick ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:945eaba92f0d94fb4d54b45cfd67c2bdd163b987d66c6ca108d470b09049bfe0
3
+ size 29795520
SkeletonDiffusion_Demo/plot_several_meshes.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from vedo import Mesh, Plane, Plotter, screenshot
4
+ import imageio.v2 as imageio
5
+ # import imageio as imageio
6
+ import re
7
+ import os
8
+ from .visual import get_ptc # probaly vpython
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+
12
+ screenshot_scale = 3
13
+
14
+ PLAY_OBS_BEFORE_PRED = True
15
+ obs_dir = ""
16
+ cam = dict(
17
+ position=(-3.85922, -4.78140, 2.689328),
18
+ focal_point=(0.192544, 4.77379e-3, 0.0127248),
19
+ viewup=(0.0724348, 0.109097, 0.991388),
20
+ distance=5.25119,
21
+ clipping_range=(4.38228, 6.36775),
22
+ )
23
+
24
+ _green= [0.4, 0.7, 0.5]
25
+ _blue =[0.7, 0.9, 1.0]
26
+ _dark_blue =[0.03, 0.4, 0.7]
27
+ _orange = [0.9, 0.7, 0.2]
28
+
29
+ def get_mesh(mesh_name, color='darksalmon', mesh_opacity=1.0, mesh_lighting='default', rotation=[0, 0, 0], offset=[0, 0, 0], scale=0):
30
+ offset = np.array(offset)
31
+ #styles = ['default', 'metallic', 'plastic', 'shiny', 'glossy', 'ambient', 'off']
32
+ mesh = Mesh(mesh_name, c=color, alpha=mesh_opacity).lighting(mesh_lighting)
33
+ if scale == 0:
34
+ bnd = np.array(mesh.bounds())
35
+ scale = 1 / max(bnd[1]-bnd[0], bnd[3]-bnd[2], bnd[5]-bnd[4])
36
+ mesh.scale(scale)
37
+ mesh.pos(offset)
38
+ mesh.rotate_x(rotation[0])
39
+ mesh.rotate_y(rotation[1])
40
+ mesh.rotate_z(rotation[2])
41
+ # mesh.phong()
42
+ # mesh2.phong()
43
+ return mesh, scale
44
+
45
+ def save_png_rm_bg(filename, im, bg_color_min=[0, 255, 100], bg_color_max=None, rgb=False):
46
+ # https://stackoverflow.com/questions/72062001/remove-everything-of-a-specific-color-with-a-color-variation-tolerance-from-an
47
+ # for transperancy:
48
+ # https://stackoverflow.com/questions/55582117/efficiently-converting-color-to-transparency-in-python
49
+
50
+ if bg_color_max is None:
51
+ bg_color_max = bg_color_min
52
+ # bg_color_min = [bg_color_min[2], bg_color_min[1], bg_color_min[0]]
53
+ # bg_color_max = [bg_color_max[2], bg_color_max[1], bg_color_max[0]]
54
+ # Load image
55
+ # im = cv2.imread('test.png')
56
+
57
+ # Define lower and upper limits of our blue
58
+ min = np.array(bg_color_min, np.uint8)
59
+ max = np.array(bg_color_max, np.uint8)
60
+
61
+ # Go to HSV colourspace and get mask of blue pixels
62
+ # HSV = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
63
+ mask = cv2.inRange(im, min, max)
64
+ # Try dilating (enlarging) mask with 3x3 structuring element
65
+ # SE = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
66
+ # mask = cv2.dilate(mask, kernel, iterations=1)
67
+
68
+ # create alpha channel
69
+ alpha = (im[:, :, 0] * 0 + 255)
70
+
71
+ # Make all pixels in mask white
72
+ alpha[mask > 0] = 0
73
+
74
+ # add alpha channel
75
+ if rgb:
76
+ im = cv2.merge((im[:, :, 0], im[:, :, 1], im[:, :, 2], alpha))
77
+ else:
78
+ im = cv2.merge((im[:, :, 2], im[:, :, 1], im[:, :, 0], alpha))
79
+
80
+ # crop to visible part
81
+ mask = im[:, :, 3] != 0.
82
+ coords = np.argwhere(mask)
83
+ y0, x0 = coords.min(axis=0)
84
+ y1, x1 = coords.max(axis=0) + 1
85
+ # im = im[y0:y1, x0:x1, :]
86
+
87
+ # write to file
88
+ cv2.imwrite(filename, im)
89
+
90
+ def save_plot_as_transparent_png(plt, out_filename):
91
+ # if not out_filename is None:
92
+ bg_col = [255, 255, 255]
93
+ plt.background(c1=bg_col)
94
+ save_png_rm_bg(out_filename, plt.screenshot(asarray=True), bg_color_min=bg_col)
95
+
96
+
97
+ def get_mesh_shadow(mesh, offset=[0, 0, 0], plane_normal=(0, 0, 1), direction=[0.1, -1.8, 3]):
98
+ shadow = []
99
+ shad_col = np.array([0.8, 0.8, 0.8])
100
+ min_z = mesh.vertices.min(axis=0)[2]
101
+ # mesh.add_shadow(plane='z', point=min_z, alpha=1, c=shad_col, culling=0.9,)
102
+ plane = Plane(pos=np.array([0, 0, min_z]) + np.array(offset), normal=plane_normal, s=[7, 7]).alpha(0.2)
103
+ shad = mesh.clone().project_on_plane(plane, direction=-np.array(direction) + np.array(offset))
104
+ shad.c(shad_col).alpha(1).lighting("off").use_bounds(False)
105
+ shadow = shad
106
+ return shadow
107
+
108
+ def plot_meshes(mesh_files, cam, mesh_color=[1, 1, 1], rotation=[0, 0, 0]):
109
+
110
+ mesh_opacity = 0.8
111
+ plot_shadows = True
112
+
113
+ pass
114
+
115
+ def create_visual(mesh_files, color='coral', dest_name="", x=0,y=0,z=0, type='mesh', mesh_lighting='plastic', obs_dir=""):
116
+ meshes, scales =[], []
117
+ bg_col = [255, 255, 255]# plotter = Plotter(offscreen=True)
118
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
119
+
120
+ frame_files = []
121
+ # lights = get_lights()
122
+ for file in tqdm(mesh_files):
123
+ mesh, scale = get_mesh(file, color=color, rotation=[x,y,z], mesh_lighting=mesh_lighting, scale=0.6)
124
+ # scalars = 1- mesh.vertices[:, 2]
125
+ # mesh.cmap('Oranges', scalars)
126
+
127
+ plotter.clear()
128
+
129
+ shadow = get_mesh_shadow(mesh)
130
+
131
+ # plotter.add([mesh, shadow])
132
+ # plotter.show(mesh, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
133
+ plotter.show(mesh, shadow, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
134
+ frame_path =file[:-4] + ".png"
135
+ frame_files.append(frame_path)
136
+ screenshot(frame_path, scale=1)
137
+ save_png_rm_bg(file[:-4]+"new.png", plotter.screenshot(asarray=True), bg_color_min=bg_col)
138
+
139
+ # Compile saved frames into a GIF
140
+ filename = os.path.join(dest_name + '.gif')
141
+ create_transparent_gif(frame_files, dest_name, obs_dir)
142
+ # with imageio.get_writer(filename, mode='I', duration=2) as writer: #duration=0.2) as writer:
143
+ # for filename in frame_files:
144
+ # image = imageio.imread(filename)
145
+ # writer.append_data(image)
146
+
147
+
148
+ def create_transparent_gif(frame_files, dest_name, obs_dir):
149
+ images = []
150
+ if "obs" not in dest_name and PLAY_OBS_BEFORE_PRED:
151
+ # visit all file under obs_dir that end with new.png
152
+ obs_files = [os.path.join(obs_dir, f) for f in os.listdir(obs_dir) if f.endswith('new.png')]
153
+ # sort the files
154
+ obs_files = sort_list(obs_files)
155
+ for i, frame in enumerate(obs_files):
156
+ im = Image.open(frame)
157
+ images.append(im)
158
+ for i, frame in enumerate(frame_files):
159
+ im = Image.open(frame.replace('.png', 'new.png'))
160
+ images.append(im)
161
+
162
+ # print(images.size)
163
+ images[0].save(
164
+ os.path.join(dest_name + '_tranp.gif'),
165
+ format="GIF",
166
+ save_all=True,
167
+ loop=1,
168
+ append_images=images,
169
+ duration=0.016,
170
+ disposal=2,
171
+ )
172
+
173
+ def sort_list(l):
174
+ try:
175
+ return list(sorted(l, key=lambda x: int(re.search(r'\d+(?=\.)', x).group())))
176
+ except AttributeError:
177
+ return sorted(l)
178
+
179
+
180
+ def visual_gt(mesh_files, color, x,y,z, type='mesh'):
181
+ bg_col = [255, 255, 255]# plotter = Plotter(offscreen=True)
182
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
183
+
184
+ frame_files = []
185
+ # lights = get_lights()
186
+ for file in mesh_files:
187
+
188
+ if type == 'mesh':
189
+ mesh, scale = get_mesh(file, color=color, rotation=[x,y,z], mesh_lighting='plastic')
190
+ # scalars = 1- mesh.vertices[:, 2]
191
+ # mesh.cmap('Blues', scalars)
192
+ else:
193
+ mesh, scale = get_ptc(file, rotation=[x,y,z])
194
+ mesh.c([0.5,0.5,0.5])
195
+
196
+ plotter.clear()
197
+
198
+ shadow = get_mesh_shadow(mesh)
199
+
200
+ # plotter.add([mesh, shadow])
201
+ plotter.show(mesh, shadow, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
202
+ frame_path =file[:-4] + ".png"
203
+ frame_files.append(frame_path)
204
+ screenshot(frame_path, scale=1)
205
+ save_png_rm_bg(file[:-4]+"new.png", plotter.screenshot(asarray=True), bg_color_min=bg_col)
206
+
207
+
208
+ def plot_two_mesh(meshfile1, meshfile2, color, x,y,z):
209
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
210
+ mesh1, scale = get_ptc(meshfile1, rotation=[x,y,z])
211
+ mesh1.alpha(1.0).c([0.5,0.5,0.5])
212
+ # scalars = 1- mesh1.vertices[:, 2]
213
+ # mesh1.cmap('Oranges', scalars)
214
+ # mesh1.c(_dark_blue)
215
+ mesh2, scale = get_mesh(meshfile2, color=color, rotation=[x,y,z], mesh_lighting='plastic')
216
+
217
+ shadow = get_mesh_shadow(mesh2)
218
+ plotter.show(mesh1, mesh2, shadow, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
219
+ frame_path =meshfile2[:-4] + ".png"
220
+
221
+ screenshot(frame_path, scale=1)
222
+ save_png_rm_bg(meshfile2[:-4]+"both.png", plotter.screenshot(asarray=True), bg_color_min=[255, 255, 255])
223
+
224
+ import argparse
225
+
226
+
227
+
228
+
229
+ def main(parent_folder):
230
+ file_type = '.obj' #'mesh.ply'
231
+ x,y,z = 0, 0, 0
232
+ dest_dir = os.path.join(os.path.dirname(parent_folder), 'shadow_gif')
233
+ obs_dir = os.path.join(os.path.dirname(parent_folder), 'obs_obj')
234
+ os.makedirs(dest_dir, exist_ok=True)
235
+ subfolders = sorted([f for f in os.listdir(parent_folder) if os.path.isdir(os.path.join(parent_folder, f)) and 'obj' in f and ('gt' in f or 'closest' in f or 'far_49' in f)])
236
+ for f in subfolders:
237
+ folder = os.path.join(parent_folder, f) #'final_output/hmp/visuals_10samples/h36m/SkeletonDiffusion/test/952_WalkTogether/obj/gt'
238
+ dest_name = os.path.join(dest_dir, f) #'./meshes_with_shadows_aspng'
239
+
240
+
241
+ mesh_files = [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(file_type)]
242
+ mesh_files = sort_list(mesh_files)
243
+ os.makedirs(dest_dir, exist_ok=True)
244
+
245
+
246
+ #color "lightblue" "navajowhite"
247
+ if 'obs' in f:
248
+ create_visual(mesh_files, _green, dest_name, x,y,z, type='mesh', mesh_lighting='plastic', obs_dir=obs_dir) #--> mesh_animation.gif
249
+ else:
250
+ create_visual(mesh_files, _dark_blue, dest_name, x,y,z, type='mesh', mesh_lighting='plastic', obs_dir=obs_dir) #--> mesh_animation.gif
251
+
252
+
253
+ if __name__ == "__main__":
254
+ parser = argparse.ArgumentParser()
255
+ #conda activate hmp_visualize
256
+ parser.add_argument('-f', '--mesh_parent_folder', type=str, default="final_output/hmp/visuals/amass/SkeletonDiffusion/test/952_WalkTogether/obj")
257
+
258
+ args = parser.parse_args()
259
+ main(args.mesh_parent_folder)
260
+
261
+ ####################################################################################################################################
262
+ # Old code snippet, kept just for reference. Jialin here there may be somehting you will need, or maybe not :) If you do not need you can delete
263
+ ####################################################################################################################################
264
+
265
+ # other functions not needed, from older code
266
+ # visual_gt(gt_mesh_files, 0,0,-90, type='mesh') # this line was commented out by Lu
267
+ # visual_gt(gt_obj_files, 'salmon', x,y,z)#--> .png and new.png (transparent) per frame
268
+ # gt_ptc_files = [os.path.join(folder, prefix +'gt_pointcloud_x.ply'),os.path.join(folder, prefix +'gt_pointcloud_y.ply')]
269
+ # gt_obj_files = sort_list([os.path.join(target_folder, f) for f in os.listdir(target_folder) if f.endswith(file_type)])
270
+ # plot_two_mesh(gt_ptc_files[1], mesh_files[-1], "lightblue", x,y,z)
271
+
272
+ # older code
273
+ # final_folder
274
+ # gif_folder
275
+ # filename = os.path.join(dest_name + '.gif')
276
+ # frame_files = [os.path.join(gif_folder, f"pred{i}.gif") for i in range(10)]
277
+ # obs_image = imageio.imread(os.path.join(gif_folder, f"obs_salmon.png"))
278
+ # for i,filename in enumerate(frame_files):
279
+ # image = imageio.imread(filename)
280
+ # frames = np.vstack([obs_image, image])
281
+ # imageio.imwrite(os.path.join(final_folder, f"pred{i}.gif"), frames, fps=50)
282
+
283
+ # image = imageio.imread(os.path.join(gif_folder, f"gt_salmon.gif"))
284
+ # frames = np.vstack([obs_image, image])
285
+ # imageio.imwrite(os.path.join(final_folder, f"gt.gif"), frames, fps=50)
286
+
287
+
288
+
289
+ # gifs= [imageio.imread(os.path.join(final_folder, f"pred{i}.gif")) for i in range(10)]
290
+ # #Create writer object
291
+ # new_gif = imageio.get_writer(os.path.join(final_folder, f"output.gif"))
292
+
293
+ # for frame_number in range(len(gifs[0])):
294
+ # img1 = gif1.get_next_data()
295
+ # img2 = gif2.get_next_data()
296
+ # #here is the magic
297
+ # upper_row = np.hstack((gifs[:5, frame_number]))
298
+ # lower_row = np.hstack((gifs[5:, frame_number]))
299
+ # new_image = np.vstack((upper_row, lower_row))
300
+ # new_gif.append_data(new_image)
301
+
302
+
303
+ print("done.")
304
+
SkeletonDiffusion_Demo/setup_headless.bash ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -x
3
+ export DISPLAY=:99.0
4
+ which Xvfb
5
+ Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &
6
+ # sleep 3
7
+ set +x
8
+ exec "$@"
SkeletonDiffusion_Demo/visual.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vedo import Mesh, Points, Plotter, screenshot, Arrow, Light, Plane
2
+ import imageio
3
+ import os
4
+ import numpy as np
5
+ from scipy.interpolate import Rbf, NearestNDInterpolator as Near
6
+ import re
7
+
8
+ screenshot_scale = 3
9
+
10
+ cam = dict(
11
+ position=(-3.85922, -4.78140, 0.689328),
12
+ focal_point=(0.192544, 4.77379e-3, 0.0127248),
13
+ viewup=(0.0724348, 0.109097, 0.991388),
14
+ distance=5.25119,
15
+ clipping_range=(4.38228, 6.36775),
16
+ )
17
+
18
+ _red = [0.9, 0.1, 0.1]
19
+ _green= [0.4, 1.0, 0.8]
20
+ _blue =[0.7, 0.9, 1.0]
21
+ _dark_blue =[0.03, 0.4, 0.7]
22
+
23
+ def sort_list(l):
24
+ try:
25
+ return list(sorted(l, key=lambda x: int(re.search(r'\d+(?=\.)', x).group())))
26
+ except AttributeError:
27
+ return sorted(l)
28
+
29
+
30
+ def get_lights(light_color=[255, 255, 245], offset=[0, 0, 0], intensity_mult=1):
31
+ # Add light sources at the given positions
32
+ # (grab the position and color of the arrow object)
33
+ orig = np.array([0, 0, 0]) + np.array(offset)
34
+ phl = Arrow(np.array([0.1, 0.1, 10]) + offset, orig, c=light_color).scale(0.2)
35
+ pfl = Arrow(np.array([1.5, 0.1, 0.3]) + offset, orig, c=light_color).scale(0.2)
36
+ pbl = Arrow(np.array([-1.5, 0.1, 0.3]) + offset, orig, c=light_color).scale(0.2)
37
+ prl = Arrow(np.array([0.1, -1.5, 0.3]) + offset, orig, c=light_color).scale(0.2)
38
+ pll = Arrow(np.array([0.1, 1.5, 0.3]) + offset, orig, c=light_color).scale(0.2)
39
+ hl = Light(phl, intensity=0.7 * intensity_mult, angle=180, )
40
+ rl = Light(pfl, intensity=0.6 * intensity_mult, angle=180, )
41
+ ll = Light(pbl, intensity=0.6 * intensity_mult, angle=180, )
42
+ bl = Light(pll, intensity=0.6 * intensity_mult, angle=180, )
43
+ fl = Light(prl, intensity=1 * intensity_mult, angle=180, )
44
+ lights = [hl, fl, bl, ll, rl]
45
+ return lights
46
+
47
+ def get_mesh(mesh_name, mesh_opacity=1.0, mesh_lighting='default', rotation=[0, 0, 0], offset=[0, 0, 0], scale=0):
48
+ offset = np.array(offset)
49
+ #styles = ['default', 'metallic', 'plastic', 'shiny', 'glossy', 'ambient', 'off']
50
+ mesh = Mesh(mesh_name, c=_blue, alpha=mesh_opacity).lighting(mesh_lighting)
51
+ if scale == 0:
52
+ bnd = np.array(mesh.bounds())
53
+ scale = 1 / max(bnd[1]-bnd[0], bnd[3]-bnd[2], bnd[5]-bnd[4])
54
+ mesh.scale(scale)
55
+ mesh.pos(offset)
56
+ mesh.rotate_x(rotation[0])
57
+ mesh.rotate_y(rotation[1])
58
+ mesh.rotate_z(rotation[2])
59
+ # mesh.phong()
60
+ # mesh2.phong()
61
+ return mesh, scale
62
+
63
+ def get_ptc(ptc_name, rotation=[0, 0, 0], offset=[0, 0, 0], scale=0, color=_dark_blue):
64
+ offset = np.array(offset)
65
+ #styles = ['default', 'metallic', 'plastic', 'shiny', 'glossy', 'ambient', 'off']
66
+ mesh = Points(Mesh(ptc_name), r=1).c([0.5,0.5,0.5]).alpha(0.8)
67
+ if scale == 0:
68
+ bnd = np.array(mesh.bounds())
69
+ scale = 1 / max(bnd[1]-bnd[0], bnd[3]-bnd[2], bnd[5]-bnd[4])
70
+ mesh.scale(scale)
71
+ mesh.pos(offset)
72
+ mesh.rotate_x(rotation[0])
73
+ mesh.rotate_y(rotation[1])
74
+ mesh.rotate_z(rotation[2])
75
+ # mesh.phong()
76
+ # mesh2.phong()
77
+ return mesh, scale
78
+
79
+ def get_mesh_shadow(mesh, offset=[0, 0, 0], plane_normal=(0, 0, 1), direction=[0.1, -1.8, 3]):
80
+ shadow = []
81
+ shad_col = np.array([0.8, 0.8, 0.8])
82
+ min_z = mesh.points().min(axis=0)[2]
83
+ # mesh.add_shadow(plane='z', point=min_z, alpha=1, c=shad_col, culling=0.9,)
84
+ plane = Plane(pos=np.array([0, 0, min_z]) + np.array(offset), normal=plane_normal, s=[7, 7]).alpha(0.2)
85
+ shad = mesh.clone().project_on_plane(plane, direction=-np.array(direction) + np.array(offset))
86
+ shad.c(shad_col).alpha(1).lighting("off").use_bounds(False)
87
+ shadow = shad
88
+ return shadow
89
+
90
+
91
+
92
+
93
+ def create_visual(mesh_files, directory, x,y,z, type='mesh'):
94
+ meshes, scales =[], []
95
+ # plotter = Plotter(offscreen=True)
96
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
97
+
98
+ frame_files = []
99
+ # lights = get_lights()
100
+ for file in mesh_files:
101
+ if type == 'mesh':
102
+ mesh, scale = get_mesh(file, rotation=[x,y,z], mesh_lighting='metallic')
103
+ else:
104
+ mesh, scale = get_ptc(file)
105
+ plotter.clear()
106
+
107
+ shadow = get_mesh_shadow(mesh)
108
+ # plotter.add([mesh, shadow])
109
+ plotter.show(mesh, shadow, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
110
+ frame_path =file[:-4] + ".png"
111
+ frame_files.append(frame_path)
112
+ screenshot(frame_path, scale=1)
113
+
114
+ # Compile saved frames into a GIF
115
+ filename = os.path.join(directory, type+'_animation.gif')
116
+ with imageio.get_writer(filename, mode='I', duration=0.2) as writer:
117
+ for filename in frame_files:
118
+ image = imageio.imread(filename)
119
+ writer.append_data(image)
120
+
121
+
122
+
123
+ def create_mesh_animation(mesh_files, directory, x,y,z):
124
+
125
+ # Load all meshes
126
+ meshes = [Mesh(f) for f in mesh_files]
127
+
128
+ # Create a plotter
129
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
130
+ # plotter = vedo.Plotter()# Use offscreen rendering to save frames
131
+
132
+ frame_files = [] # To store the path of each frame image
133
+
134
+ # plotter.camera.SetViewUp(0,0,1)
135
+
136
+
137
+ # shadow = mesh.clone().projectOnPlane(direction=(0,0,-1))
138
+
139
+ # Color the shadow mesh dark and make it slightly transparent
140
+
141
+ for frame_id, mesh in enumerate(meshes):
142
+ mesh.rotate_x(x).rotate_y(y).rotate_z(z).lighting('metallic')
143
+ shadow = get_mesh_shadow(mesh)
144
+ scalars = 1- mesh.vertices[:, 2]
145
+ mesh.cmap('Blues', scalars)
146
+ # mesh.add_shadow('x', 0.95)
147
+ plotter.clear()
148
+ # plotter.add(mesh)
149
+ plotter.show(mesh, shadow, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
150
+ frame_path = mesh_files[frame_id][:-4] + ".png"
151
+ frame_files.append(frame_path)
152
+ screenshot(frame_path, scale=1) # Save frame as image
153
+
154
+
155
+ #crop the images
156
+ # os.system("""convert -trim {0} "{1}" """.format(frame_path, frame_path))
157
+
158
+
159
+ # Compile saved frames into a GIF
160
+ filename = os.path.join(directory, 'mesh_animation.gif')
161
+ with imageio.get_writer(filename, mode='I', duration=0.2) as writer:
162
+ for f in frame_files:
163
+ image = imageio.imread(f)
164
+ writer.append_data(image)
165
+ os.system("""convert -trim {0} "{1}" """.format(f, f))
166
+
167
+
168
+
169
+
170
+
171
+ def create_point_cloud_animation(files, directory, x,y,z):
172
+ # Load all meshes
173
+ meshes = [Points(Mesh(f), r=1) for f in files]
174
+
175
+ # Create a plotter
176
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
177
+ # plotter = vedo.Plotter()# Use offscreen rendering to save frames
178
+
179
+ frame_files = [] # To store the path of each frame image
180
+
181
+ # plotter.camera.SetViewUp(0,0,1)
182
+
183
+
184
+ # shadow = mesh.clone().projectOnPlane(direction=(0,0,-1))
185
+
186
+ # Color the shadow mesh dark and make it slightly transparent
187
+
188
+ for frame_id, mesh in enumerate(meshes):
189
+ mesh.rotate_x(x).rotate_y(y).rotate_z(z).c([0.5,0.5,0.5])
190
+ shadow = get_mesh_shadow(mesh)
191
+ # scalars = 1- mesh.vertices[:, 2]
192
+ # mesh.cmap('binary', scalars)
193
+ plotter.clear()
194
+ plotter.show(mesh, shadow, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
195
+
196
+ frame_path = files[frame_id][:-4] + ".png"
197
+ frame_files.append(frame_path)
198
+ screenshot(frame_path, scale=1) # Save frame as image
199
+
200
+
201
+ #crop the images
202
+ # os.system("""convert -trim {0} "{1}" """.format(frame_path, frame_path))
203
+
204
+
205
+ # Compile saved frames into a GIF
206
+ filename = os.path.join(directory, 'pct_animation.gif')
207
+ with imageio.get_writer(filename, mode='I', duration=0.2) as writer:
208
+ for f in frame_files:
209
+ image = imageio.imread(f)
210
+ writer.append_data(image)
211
+ os.system("""convert -trim {0} "{1}" """.format(f, f))
212
+
213
+
214
+
215
+ def point_cloud_colormap(gt_ptc, ptc, corr_num):
216
+
217
+ s1, _ = get_ptc(gt_ptc, color=_red)
218
+ s2, _ = get_ptc(ptc, color=_blue)
219
+
220
+ landmarks1 = s1.vertices[:corr_num,:]
221
+ landmarks2 = s2.vertices[:corr_num,:]
222
+
223
+ x, y, z = np.split(landmarks1, 3, axis=1)
224
+ desc = x**2 + y**2 + z**2
225
+
226
+ # itr = Rbf(x, y, z, desc) # Radial Basis Function interpolator
227
+ itr = Near(landmarks1, desc) # Nearest-neighbour interpolator
228
+
229
+ # interpolate descriptor on the full set of mesh vertices
230
+ xi, yi, zi = np.split(s2.vertices, 3, axis=1)
231
+ interpolated_desc = itr(xi, yi, zi)
232
+
233
+ s2.cmap('coolwarm', interpolated_desc.squeeze()).add_scalarbar(title='error')
234
+ # s2.point_colors(interpolated_desc, cmap='coolwarm', vmin=min(interpolated_desc), vmax=max(interpolated_desc)).addScalarBar(title='$\error$')
235
+
236
+ plotter = Plotter()
237
+
238
+ plotter.show(s2, __doc__, axes=True)
239
+
240
+
241
+ def mesh_colormap(gt_mesh, mesh, corr_num):
242
+
243
+ s1, _ = get_ptc(gt_ptc, color=_red)
244
+ s2, _ = get_ptc(ptc, color=_blue)
245
+
246
+ landmarks1 = s1.vertices[:corr_num,:]
247
+ landmarks2 = s2.vertices[:corr_num,:]
248
+
249
+ x, y, z = np.split(landmarks1, 3, axis=1)
250
+ desc = x**2 + y**2 + z**2
251
+
252
+ # itr = Rbf(x, y, z, desc) # Radial Basis Function interpolator
253
+ itr = Near(landmarks1, desc) # Nearest-neighbour interpolator
254
+
255
+ # interpolate descriptor on the full set of mesh vertices
256
+ xi, yi, zi = np.split(s2.vertices, 3, axis=1)
257
+ interpolated_desc = itr(xi, yi, zi)
258
+
259
+ s2.cmap('coolwarm', interpolated_desc.squeeze()).add_scalarbar(title='error')
260
+ # s2.point_colors(interpolated_desc, cmap='coolwarm', vmin=min(interpolated_desc), vmax=max(interpolated_desc)).addScalarBar(title='$\error$')
261
+
262
+ plotter = Plotter()
263
+
264
+ plotter.show(s2, __doc__, axes=True)
265
+
266
+
267
+ def plot_two_mesh(meshfile1, meshfile2, x,y,z):
268
+ plotter = Plotter(bg=[255, 255, 255], offscreen=True)
269
+ mesh1, scale = get_ptc(meshfile1, rotation=[x,y,z])
270
+ mesh1.alpha(1.0).c([0.2,0.2,0.2])
271
+ # scalars = 1- mesh1.vertices[:, 2]
272
+ # mesh1.cmap('Oranges', scalars)
273
+ # mesh1.c(_dark_blue)
274
+ mesh2, scale = get_mesh(meshfile2, rotation=[x,y,z], mesh_lighting='shiny')
275
+ scalars = 1- mesh2.vertices[:, 2]
276
+ mesh2.cmap('Blues', scalars)
277
+ plotter.show(mesh1, mesh2, __doc__, interactive=False, camera=cam, resetcam=True, zoom=2)
278
+ frame_path =meshfile2[:-4] + ".png"
279
+
280
+ screenshot(frame_path, scale=1)
281
+ os.system("""convert -trim {0} "{1}" """.format(frame_path, frame_path))
282
+
283
+
284
+ if __name__ == "__main__":
285
+
286
+ folder = '/home/wiss/sang/git/implicit_neuro_morph/jax_implicit_neuro_morph/exp/smal/2024_04_18_15_11_59/reconstructions'
287
+ prefix = 'shape_0_to_4000_'
288
+
289
+ # filenames = [os.path.join(folder, prefix + str(i) + '_mesh.ply') for i in range(0,11)]
290
+ # create_mesh_animation(filenames, folder, -45, -45, 90)
291
+
292
+
293
+ # filenames = [os.path.join(folder, prefix + str(i) + '_ptc.ply') for i in range(0,11)]
294
+
295
+ # gt_ptc = os.path.join(folder, prefix + 'gt_pointcloud_y.ply')
296
+ # ptc = os.path.join(folder, prefix + 'epoch_10000_time_10_ptc.ply')
297
+
298
+ # create_point_cloud_animation(filenames, folder, -45, -45, 90)
299
+
300
+ folder = '/home/wiss/sang/git/implicit_neuro_morph/jax_implicit_neuro_morph/exp/smal/2024_04_18_15_11_59/eval'
301
+ # mesh_files = [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith('mesh.ply')]
302
+ # mesh_files = sort_list(mesh_files)
303
+
304
+ # create_visual(mesh_files, folder, 0,0,-90, type='mesh')
305
+
306
+ # create_mesh_animation(mesh_files, folder, 0,0, -90)
307
+
308
+ # folder = '/home/wiss/sang/git/implicit_neuro_morph/jax_implicit_neuro_morph/exp/fraust_r/2024_04_22_14_01_05/eval'
309
+ # mesh_files = [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith('ptc.ply')]
310
+ # mesh_files = sort_list(mesh_files)
311
+ # create_point_cloud_animation(mesh_files, folder, 0,0,-90)
312
+
313
+ prefix = 'shape_0_to_4000_'
314
+ mesh1 = 'gt_pointcloud_y.ply'
315
+ mesh2 = 'step_10_mesh.ply'
316
+
317
+ meshfile1 = os.path.join(folder, prefix + mesh1)
318
+ meshfile2 = os.path.join(folder, prefix + mesh2)
319
+
320
+ plot_two_mesh(meshfile1, meshfile2, 0, 0, -30)
321
+
322
+
app.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ import gradio as gr
3
+ import imageio
4
+ from yt_dlp import YoutubeDL
5
+ import cv2
6
+ import torch
7
+ import torchvision
8
+ import tempfile
9
+ import numpy as np
10
+ import smplx
11
+ import pyrender
12
+ import trimesh
13
+ import trimesh.transformations as tra
14
+ from dataclasses import dataclass
15
+ from typing import List, Dict, Any
16
+ import SkeletonDiffusion_Demo.plot_several_meshes as plot_several_meshes
17
+ import SkeletonDiffusion_Demo.combine_video as combine
18
+ import os
19
+
20
+ # fix no display problem
21
+ os.environ['PYOPENGL_PLATFORM'] = 'egl'
22
+ os.system('export IMAGEMAGICK_BINARY=/home/stud/yaji/storage/user/yaji/NonisotropicSkeletonDiffusion/magick')
23
+
24
+
25
+ # Load your torchscript model (ensure the path is correct)
26
+ nlf_model = torch.jit.load("./models/nlf_l_multi.torchscript").cuda().eval()
27
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+
29
+ # Load SMPL model (ensure the pkl file is correct)
30
+ # This code assumes the SMPL_NEUTRAL.pkl model has its canonical orientation with the face along +X and up along +Y.
31
+ smpl_model = smplx.create("./models/SMPL_NEUTRAL.pkl", model_type="smpl", gender="neutral").to(
32
+ device
33
+ )
34
+ smpl_params = []
35
+ DESCRIPTION = "# SMPL Visualization Demo"
36
+ FRAME_LIMIT = 100
37
+
38
+
39
+ @dataclass
40
+ class SMPLParams:
41
+ """
42
+ Data structure to hold SMPL parameters.
43
+ """
44
+
45
+ global_orient: torch.Tensor
46
+ body_pose: torch.Tensor
47
+ betas: torch.Tensor
48
+ transl: torch.Tensor
49
+
50
+
51
+ def handle_video_input(video_file, youtube_url):
52
+ """Handles the video input: either a local file or a YouTube URL."""
53
+ if youtube_url:
54
+ ydl_opts = {
55
+ "format": "best",
56
+ "outtmpl": "downloads/%(title)s.%(ext)s",
57
+ "cookies": "cookies/cookies.txt",
58
+ }
59
+ with YoutubeDL(ydl_opts) as ydl:
60
+ info = ydl.extract_info(youtube_url, download=True)
61
+ video_path = ydl.prepare_filename(info)
62
+ return video_path
63
+ elif video_file:
64
+ return video_file
65
+ return None
66
+
67
+
68
+ def correct_vertices(vertices):
69
+ """
70
+ Corrects the SMPL vertices to convert from the SMPL coordinate system to the renderer's coordinate system.
71
+
72
+ This version applies a rotation about the Y-axis by -90 degrees so that the original +X axis (assumed to be the
73
+ face direction) is transformed to the -Z axis (i.e., the model will face the camera if the camera is placed at
74
+ [0, 0, distance] looking along -Z). The up direction (Y axis) remains unchanged.
75
+ """
76
+ angle = np.radians(180)
77
+ # Build a 4x4 rotation matrix around Y
78
+ R = tra.rotation_matrix(angle, [1, 0, 0])
79
+ # Convert vertices to homogeneous coordinates (assumes vertices shape is (1, N, 3))
80
+ vertices_homo = np.hstack(
81
+ [vertices[0], np.ones((vertices[0].shape[0], 1))]
82
+ ) # shape: (N, 4)
83
+ vertices_corrected = (R @ vertices_homo.T).T # Apply rotation
84
+ # Reshape back to (1, N, 3)
85
+ return vertices_corrected[:, :3].reshape(1, -1, 3)
86
+
87
+
88
+ def render_smpl(vertices, width, height):
89
+ """
90
+ Renders the SMPL 3D model using PyRender.
91
+
92
+ - Applies a coordinate correction to the SMPL vertices.
93
+ - Builds a trimesh object and adds it to a pyrender scene.
94
+ - Sets up an orthographic camera with a given pose.
95
+ - Renders the scene offscreen.
96
+ - Converts the output image from RGB to BGR (to match OpenCV color format).
97
+ """
98
+ # Correct the vertices using the new rotation
99
+ vertices_corrected = correct_vertices(vertices)
100
+ # Create a trimesh mesh object using the corrected vertices and the SMPL faces
101
+ mesh = trimesh.Trimesh(vertices_corrected[0], smpl_model.faces)
102
+ scene = pyrender.Scene(
103
+ bg_color=[1.0, 1.0, 1.0, 0.9]
104
+ ) # Background color: white (RGB)
105
+ # remove background
106
+ mesh_node = pyrender.Mesh.from_trimesh(mesh)
107
+ scene.add(mesh_node)
108
+
109
+ # Set up an orthographic camera. Here we place the camera at [0, 0, distance] looking toward the origin.
110
+ camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)
111
+ camera_pose = np.eye(4)
112
+ distance = 5.0
113
+ camera_pose[:3, 3] = [0, 0, distance]
114
+ scene.add(camera, pose=camera_pose)
115
+
116
+ # Render the scene using an offscreen renderer
117
+ renderer = pyrender.OffscreenRenderer(width, height)
118
+ color, _ = renderer.render(scene)
119
+ # Convert from RGB to BGR for OpenCV compatibility
120
+ color_bgr = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)
121
+ return color_bgr
122
+
123
+
124
+ def process_video(video_file, youtube_url):
125
+ """
126
+ Processes the input video and outputs a GIF:
127
+ - Obtains the video path.
128
+ - Reads frames from the video.
129
+ - Runs SMPL detection on each frame.
130
+ - Generates the SMPL mesh using the SMPL model.
131
+ - Renders the SMPL mesh.
132
+ - Blends the rendered SMPL visualization with the original frame.
133
+ - Saves the processed frames as a GIF.
134
+ """
135
+ input_path = handle_video_input(video_file, youtube_url)
136
+ if not input_path:
137
+ return None
138
+
139
+ output_path = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
140
+
141
+ cap = cv2.VideoCapture(input_path)
142
+ frame_count = 0
143
+ smpl_params_list = []
144
+ rendered_smpl = None
145
+ frames = []
146
+ while cap.isOpened() and frame_count < FRAME_LIMIT:
147
+ ret, frame = cap.read()
148
+ if not ret:
149
+ break
150
+
151
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
152
+ image_tensor = torch.from_numpy(frame_rgb).permute(2, 0, 1).int().to(device)
153
+ with torch.inference_mode():
154
+ pred = nlf_model.detect_smpl_batched(image_tensor.unsqueeze(0))
155
+ pose_params = pred["pose"][0].cpu().numpy()
156
+ betas = pred["betas"][0].cpu().numpy()
157
+ transl = pred["trans"][0].cpu().numpy()
158
+
159
+ if pose_params.shape[0] == 0 and rendered_smpl is None:
160
+ print(f"No SMPL detected in frame {frame_count}")
161
+ frames.append(frame_rgb)
162
+ continue
163
+
164
+ if pose_params.shape[0] > 0:
165
+
166
+ smpl_param = SMPLParams(
167
+ global_orient=torch.tensor(pose_params[:, :3]).to(device),
168
+ body_pose=torch.tensor(pose_params[:, 3:]).to(device),
169
+ betas=torch.tensor(betas).to(device),
170
+ transl=torch.tensor(transl).to(device),
171
+ )
172
+ output_smpl = smpl_model(
173
+ global_orient=torch.tensor(pose_params[:, :3]).to(device),
174
+ body_pose=torch.tensor(pose_params[:, 3:]).to(device),
175
+ betas=torch.tensor(betas).to(device),
176
+ transl=torch.tensor(transl).to(device),
177
+ )
178
+
179
+ vertices = output_smpl.vertices.detach().cpu().numpy()
180
+ rendered_smpl = render_smpl(vertices, frame.shape[1], frame.shape[0])
181
+ smpl_params_list.append(smpl_param)
182
+ alpha = 0.6
183
+ blended = cv2.addWeighted(frame_rgb, 1 - alpha, rendered_smpl, alpha, 0)
184
+ frames.append(blended)
185
+ frame_count += 1
186
+
187
+ cap.release()
188
+
189
+ # Serialize SMPL parameters into a JSON-compatible format
190
+ smpl_params_serialized = [
191
+ {
192
+ "global_orient": p.global_orient.tolist(),
193
+ "body_pose": p.body_pose.tolist(),
194
+ "betas": p.betas.tolist(),
195
+ "transl": p.transl.tolist(),
196
+ }
197
+ for p in smpl_params_list
198
+ ]
199
+
200
+ # Save as GIF
201
+ imageio.mimsave(output_path, frames, fps=30, loop=0)
202
+ print(f"Output GIF saved to {output_path}")
203
+ return output_path, smpl_params_serialized
204
+
205
+
206
+ def generate_motion_video(smpl_params_json: List[Dict[str, Any]]):
207
+ """
208
+ Generate a motion video from the given SMPL parameters.
209
+ """
210
+ # Deserialize JSON back into SMPLParams objects
211
+ smpl_params_list = [
212
+ SMPLParams(
213
+ global_orient=torch.tensor(p["global_orient"]),
214
+ body_pose=torch.tensor(p["body_pose"]),
215
+ betas=torch.tensor(p["betas"]),
216
+ transl=torch.tensor(p["transl"]),
217
+ )
218
+ for p in smpl_params_json
219
+ ]
220
+ # TODO: Using the SMPL parameters obtained from video, generate motion and save as .obj format, rank
221
+ # and find the closest to the ground truth and the farthest from the ground truth, just like the samples.
222
+
223
+ sample_obj_path = "./9622_GRAB/"
224
+ plot_several_meshes.main(sample_obj_path)
225
+ return combine.combine_video(sample_obj_path)
226
+
227
+
228
+ with gr.Blocks(css="style.css") as demo:
229
+ gr.Markdown(DESCRIPTION)
230
+
231
+ with gr.Tabs():
232
+ with gr.Tab("Video Processing"):
233
+ with gr.Row():
234
+ with gr.Column():
235
+ input_video = gr.Video(label="Input Video")
236
+ youtube_url = gr.Textbox(label="YouTube URL")
237
+ process_btn = gr.Button("Process Video")
238
+
239
+ with gr.Column():
240
+ # output_video = gr.Video(label="SMPL Visualization")
241
+ video_to_smpl = gr.Image(label="SMPL Visualization")
242
+ # save smpl params in gradio
243
+ obs_smpl_params = gr.JSON(label="SMPL Parameters")
244
+ obs_smpl_params.visible = False
245
+ generate_btn = gr.Button("Generate Motion")
246
+ output_video = gr.Image(label="Generated Motion")
247
+
248
+ gr.Examples(
249
+ examples=sorted(pathlib.Path("downloads").glob("*.mp4")),
250
+ inputs=input_video,
251
+ outputs=video_to_smpl,
252
+ cache_examples=False,
253
+ )
254
+
255
+ process_btn.click(
256
+ fn=process_video,
257
+ inputs=[input_video, youtube_url],
258
+ outputs=[video_to_smpl, obs_smpl_params],
259
+ )
260
+ generate_btn.click(
261
+ fn=generate_motion_video, inputs=[obs_smpl_params], outputs=[output_video]
262
+ )
263
+
264
+ demo.launch(server_name="0.0.0.0", share=True)
cookies/cookies.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Netscape HTTP Cookie File
2
+ # http://curl.haxx.se/rfc/cookie_spec.html
3
+ # This is a generated file! Do not edit.
4
+
5
+ .youtube.com TRUE / FALSE 1771773809 HSID AUsUjiw5uHpjnN9FQ
6
+ .youtube.com TRUE / TRUE 1771773809 SSID ArWvlhFHWHJNe1ww7
7
+ .youtube.com TRUE / FALSE 1771773809 APISID 3V4Bd8r68qZSJoJR/Ag2KukueBOGGAojcc
8
+ .youtube.com TRUE / TRUE 1771773809 SAPISID hjs3w0PI8zVyfzfa/A_B55o1Wk_drSalkA
9
+ .youtube.com TRUE / TRUE 1771773809 __Secure-1PAPISID hjs3w0PI8zVyfzfa/A_B55o1Wk_drSalkA
10
+ .youtube.com TRUE / TRUE 1771773809 __Secure-3PAPISID hjs3w0PI8zVyfzfa/A_B55o1Wk_drSalkA
11
+ .youtube.com TRUE / FALSE 1750438835 _ga GA1.1.427766011.1715878835
12
+ .youtube.com TRUE / FALSE 1750588960 _ga_VCGEPY40VB GS1.1.1716028960.2.0.1716028960.60.0.0
13
+ .youtube.com TRUE / TRUE 1751628628 LOGIN_INFO AFmmF2swRQIhAKvoVorooDZFcdvOyhemewMTJIvSOBHPxRFnEibUwkQdAiAltzQ-cRmT4ZVM_K1G29I8kOyhqOcqwcs77DQc7aA0uA:QUQ3MjNmeGpEUmJFNXZNT2pRS2JqcnozdFMyN0xIc0pGUkNjaEd6ay01MXltZHlXQ0dpSkVpdG5rRUdRaWUxalZDLWN6RXYxZU5KMFphSE1URm9GT2ZJWHlfSDdCSHkzVzNsRGdCSFRndDZ1MGV4eEhvX1lBSHlQWG1Va04wellCaXFxOExPMkZUNUpMRS12NXUzVkFiTkFMbTJZTTluSm93
14
+ .youtube.com TRUE / TRUE 1742390134 NID 517=vTyhe8ky0TFImU6Zedz7cIBXkbO8_027L5Dlc2fcAN1FMVYtP401sld9WTCsRlLcQLC2tGyKwAEcygV4VZz_Z6fBCje4rAG_uJGP4BWyV_t_xTHeJzWJIdcSeziXRveABzB9CrEqSmQ9YNCVFWRFPsSohv5x06OTMSBaRrljZKDj8qfZl4xbF2fIDM08EiTK9u_UdIkeLvJjhJo7UnY6ot3St5z7W5UjAkw_aHoCfhvfL98tw3Op8g
15
+ .youtube.com TRUE / TRUE 1772581805 PREF f7=4100&tz=Europe.Berlin&f4=4000000&f5=30000
16
+ .youtube.com TRUE / FALSE 1771773809 SID g.a000sgjJU_L6MmDnLGVA_VkPxNDQBFRiuUjJSjiRMoSDGy1M4Q6ipIgBLsh1vRWZ01KcYNo0DQACgYKAZcSARQSFQHGX2MiEmIP-EgwP3AVZ9kO9dmvmhoVAUF8yKqgnQY68G5Egk_wIzyVPdIl0076
17
+ .youtube.com TRUE / TRUE 1771773809 __Secure-1PSID g.a000sgjJU_L6MmDnLGVA_VkPxNDQBFRiuUjJSjiRMoSDGy1M4Q6iwBbPP-uFeHSg5_eYKOWU7AACgYKAa4SARQSFQHGX2MiyjrChB0tZfL__RkQAhA5PRoVAUF8yKrAHKtgrQDIty6zOxe2Zj-x0076
18
+ .youtube.com TRUE / TRUE 1771773809 __Secure-3PSID g.a000sgjJU_L6MmDnLGVA_VkPxNDQBFRiuUjJSjiRMoSDGy1M4Q6iFLVYDARAAviR252WUqgzMAACgYKARwSARQSFQHGX2MiLFgk6z297qdWv19BK-t10xoVAUF8yKouCCN7Exq44094aUcZCFSm0076
19
+ .youtube.com TRUE / TRUE 1769557692 __Secure-1PSIDTS sidts-CjEBmiPuTQiEzpQcYFOEjV6ORPmtcwo-bDKAiMIvMTgLUMhVblTUqg8srzKsE9GKZX2HEAA
20
+ .youtube.com TRUE / TRUE 1769557692 __Secure-3PSIDTS sidts-CjEBmiPuTQiEzpQcYFOEjV6ORPmtcwo-bDKAiMIvMTgLUMhVblTUqg8srzKsE9GKZX2HEAA
21
+ .youtube.com TRUE / FALSE 1769557809 SIDCC AKEyXzVOMGH2FCJRuD62uZGsy1IbhOWgrA-kH6qsslsuEJmKBEs-OIM6AKcylOWv9VvxRUIsQWU
22
+ .youtube.com TRUE / TRUE 1769557809 __Secure-1PSIDCC AKEyXzUnJ-rqvN2XlLRlJ0t4Sd46J6u9BMuLD96kTuAoa15Ou6CEGGg3Zsbv5PubZpprUft-WRYf
23
+ .youtube.com TRUE / TRUE 1769557809 __Secure-3PSIDCC AKEyXzVFIBk3DnxqCV-t9UcQRiQ-GI0XM8D_zSLmK_cPUYrQiTXU6iXMjc-uyaB2ipUpJ2NP9Hk
24
+ .youtube.com TRUE / TRUE 1753573803 VISITOR_INFO1_LIVE VR8ZV-E7KNk
25
+ .youtube.com TRUE / TRUE 1753573803 VISITOR_PRIVACY_METADATA CgJERRIEEgAgKRgB
26
+ .youtube.com TRUE / TRUE 0 YSC GkwTh-BBCSY
27
+ .youtube.com TRUE / TRUE 1753571280 __Secure-ROLLOUT_TOKEN CJfv8rXn-bvHAxCH75WF5euKAxi1w5yugpeLAw%3D%3D
gradio.sbatch ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name="gradio_app_test"
3
+ #SBATCH --cpus-per-task=4
4
+ #SBATCH --gres=gpu:1,VRAM:24G
5
+ #SBATCH --constraint="GPU_MODEL:rtx_6000"
6
+ #SBATCH --mem=30G
7
+ #SBATCH --time=96:00:00
8
+ #SBATCH --mail-type=FAIL,INVALID_DEPEND,STAGE_OUT,TIME_LIMIT,REQUEUE
9
+ #SBATCH --output=/home/stud/yaji/slurm/logs/slurm-%j.out
10
+ #SBATCH --error=/home/stud/yaji/slurm/logs/slurm-%j.out
11
+
12
+ GPU_TYPE=$(nvidia-smi -i 0 --query-gpu=name --format=csv,noheader)
13
+ GPU_TYPE="${GPU_TYPE// /_}"
14
+ echo $GPU_TYPE
15
+
16
+ python app.py
magick ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:945eaba92f0d94fb4d54b45cfd67c2bdd163b987d66c6ca108d470b09049bfe0
3
+ size 29795520
models/nlf_l_multi.torchscript ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2090b041ccebbd395018033152628c4f24afe519aed3a299861621547582cdad
3
+ size 495696900
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ yt_dlp
style.css ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: #fff;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
+ }
text.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ try:
3
+ subprocess.run(['ffmpeg', '-version'], check=True)
4
+ except Exception as e:
5
+ print("FFmpeg not found:", e)
6
+ print("Running FFmpeg command:", ' '.join(ffmpeg_cmd))
7
+ print("FFmpeg output:", result.stdout)
8
+ print("FFmpeg errors:", result.stderr)
tmp/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch