VEO3 Free Latinoamérica - Interfaz completa en español con documentación y ejemplos adaptados
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- DOCUMENTACION_LATAM.md +3 -0
- README.md +2 -2
- app.py +448 -418
- config.py +128 -0
- mmaudio/__init__.py +0 -0
- mmaudio/data/__init__.py +0 -0
- mmaudio/data/av_utils.py +136 -0
- mmaudio/eval_utils.py +217 -0
- mmaudio/ext/__init__.py +1 -0
- mmaudio/ext/autoencoder/__init__.py +1 -0
- mmaudio/ext/autoencoder/autoencoder.py +52 -0
- mmaudio/ext/autoencoder/edm2_utils.py +168 -0
- mmaudio/ext/autoencoder/vae.py +373 -0
- mmaudio/ext/autoencoder/vae_modules.py +117 -0
- mmaudio/ext/bigvgan/LICENSE +21 -0
- mmaudio/ext/bigvgan/__init__.py +1 -0
- mmaudio/ext/bigvgan/activations.py +120 -0
- mmaudio/ext/bigvgan/alias_free_torch/__init__.py +6 -0
- mmaudio/ext/bigvgan/alias_free_torch/act.py +28 -0
- mmaudio/ext/bigvgan/alias_free_torch/filter.py +95 -0
- mmaudio/ext/bigvgan/alias_free_torch/resample.py +49 -0
- mmaudio/ext/bigvgan/bigvgan.py +32 -0
- mmaudio/ext/bigvgan/bigvgan_vocoder.yml +3 -0
- mmaudio/ext/bigvgan/env.py +18 -0
- mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 +21 -0
- mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 +21 -0
- mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 +201 -0
- mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 +29 -0
- mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 +16 -0
- mmaudio/ext/bigvgan/models.py +255 -0
- mmaudio/ext/bigvgan/utils.py +31 -0
- mmaudio/ext/bigvgan_v2/LICENSE +21 -0
- mmaudio/ext/bigvgan_v2/__init__.py +0 -0
- mmaudio/ext/bigvgan_v2/activations.py +126 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py +0 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py +77 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu +246 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h +29 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py +86 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h +92 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py +6 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py +32 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py +101 -0
- mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py +54 -0
- mmaudio/ext/bigvgan_v2/bigvgan.py +439 -0
- mmaudio/ext/bigvgan_v2/env.py +18 -0
- mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 +21 -0
- mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 +21 -0
- mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 +201 -0
DOCUMENTACION_LATAM.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b08680b3f1d7c010e7e74d09cdf5d90166cf0141cd09bf0ec6f0b8fea26689c
|
| 3 |
+
size 7899
|
README.md
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2c7386255dc78815a37e0332b36555cc424e39e367faa5aa306a769133fb7cba
|
| 3 |
+
size 7128
|
app.py
CHANGED
|
@@ -1,479 +1,509 @@
|
|
| 1 |
-
import
|
| 2 |
-
import numpy as np
|
| 3 |
import random
|
| 4 |
-
import
|
| 5 |
-
import
|
| 6 |
-
import warnings
|
| 7 |
-
from typing import Optional, Tuple
|
| 8 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
os.environ["TRANSFORMERS_VERBOSITY"] = "error"
|
| 13 |
|
| 14 |
-
#
|
| 15 |
try:
|
| 16 |
-
|
| 17 |
-
from diffusers import DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler
|
| 18 |
-
DIFFUSERS_AVAILABLE = True
|
| 19 |
except ImportError:
|
| 20 |
-
|
|
|
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
torch.set_num_threads(2)
|
| 34 |
MAX_SEED = np.iinfo(np.int32).max
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
"repo_id": "CompVis/stable-diffusion-v1-4",
|
| 40 |
-
"revision": "main",
|
| 41 |
-
"torch_dtype": torch.float32,
|
| 42 |
-
"max_resolution": 512,
|
| 43 |
-
"default_steps": 15,
|
| 44 |
-
"default_guidance": 7.5,
|
| 45 |
-
"memory_usage": "Low",
|
| 46 |
-
"compatible": True
|
| 47 |
-
},
|
| 48 |
-
"⚡ Runway SD 1.5 (Recommended)": {
|
| 49 |
-
"repo_id": "runwayml/stable-diffusion-v1-5",
|
| 50 |
-
"revision": "main",
|
| 51 |
-
"torch_dtype": torch.float32,
|
| 52 |
-
"max_resolution": 512,
|
| 53 |
-
"default_steps": 20,
|
| 54 |
-
"default_guidance": 7.5,
|
| 55 |
-
"memory_usage": "Medium",
|
| 56 |
-
"compatible": True
|
| 57 |
-
},
|
| 58 |
-
"🎨 OpenJourney v4 (Artistic)": {
|
| 59 |
-
"repo_id": "prompthero/openjourney-v4",
|
| 60 |
-
"revision": "main",
|
| 61 |
-
"torch_dtype": torch.float32,
|
| 62 |
-
"max_resolution": 512,
|
| 63 |
-
"default_steps": 18,
|
| 64 |
-
"default_guidance": 8.0,
|
| 65 |
-
"memory_usage": "Medium",
|
| 66 |
-
"compatible": True
|
| 67 |
-
},
|
| 68 |
-
"🌟 Anything v3 (Anime Style)": {
|
| 69 |
-
"repo_id": "Linaqruf/anything-v3.0",
|
| 70 |
-
"revision": "main",
|
| 71 |
-
"torch_dtype": torch.float32,
|
| 72 |
-
"max_resolution": 512,
|
| 73 |
-
"default_steps": 20,
|
| 74 |
-
"default_guidance": 8.5,
|
| 75 |
-
"memory_usage": "Medium",
|
| 76 |
-
"compatible": True
|
| 77 |
-
}
|
| 78 |
-
}
|
| 79 |
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
current_model_name = None
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
gc.collect()
|
| 91 |
-
if torch.cuda.is_available():
|
| 92 |
-
torch.cuda.empty_cache()
|
| 93 |
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
-
|
|
|
|
|
|
|
| 102 |
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
try:
|
| 107 |
-
|
| 108 |
|
| 109 |
-
#
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
safety_checker=None,
|
| 115 |
-
requires_safety_checker=False,
|
| 116 |
-
use_auth_token=False,
|
| 117 |
-
cache_dir=None,
|
| 118 |
-
local_files_only=False,
|
| 119 |
-
low_cpu_mem_usage=True,
|
| 120 |
-
ignore_mismatched_sizes=True
|
| 121 |
)
|
| 122 |
|
| 123 |
-
#
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
# Enable attention slicing for memory efficiency
|
| 127 |
-
if hasattr(pipe, 'enable_attention_slicing'):
|
| 128 |
-
pipe.enable_attention_slicing(1)
|
| 129 |
-
|
| 130 |
-
# Use compatible scheduler
|
| 131 |
-
if hasattr(pipe, 'scheduler'):
|
| 132 |
-
try:
|
| 133 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 134 |
-
except:
|
| 135 |
-
pass # Keep original scheduler if DDIMScheduler fails
|
| 136 |
-
|
| 137 |
-
# Memory optimizations
|
| 138 |
-
if hasattr(pipe.unet, 'to'):
|
| 139 |
-
pipe.unet.to(memory_format=torch.channels_last)
|
| 140 |
-
if hasattr(pipe.vae, 'to'):
|
| 141 |
-
pipe.vae.to(memory_format=torch.channels_last)
|
| 142 |
-
|
| 143 |
-
current_pipeline = pipe
|
| 144 |
-
current_model_name = model_name
|
| 145 |
-
|
| 146 |
-
return pipe, f"✅ {model_name} loaded successfully!"
|
| 147 |
|
|
|
|
| 148 |
except Exception as e:
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
return None, "❌ Version incompatibility. Please update PyTorch to >= 2.1 or use older Diffusers version."
|
| 152 |
-
elif "out of memory" in error_msg.lower():
|
| 153 |
-
return None, "❌ Out of memory. Try using a different model or restart the space."
|
| 154 |
-
else:
|
| 155 |
-
return None, f"❌ Failed to load model: {error_msg[:200]}..."
|
| 156 |
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
| 175 |
if pipe is None:
|
| 176 |
-
return None,
|
| 177 |
-
|
| 178 |
try:
|
| 179 |
-
#
|
| 180 |
-
|
| 181 |
-
|
| 182 |
|
| 183 |
-
|
| 184 |
|
| 185 |
-
|
| 186 |
-
config = MODEL_CONFIGS[model_name]
|
| 187 |
-
max_res = config["max_resolution"]
|
| 188 |
-
width = min(width, max_res)
|
| 189 |
-
height = min(height, max_res)
|
| 190 |
|
| 191 |
-
|
| 192 |
-
width = (width // 8) * 8
|
| 193 |
-
height = (height // 8) * 8
|
| 194 |
|
| 195 |
-
|
| 196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
|
| 198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
-
#
|
| 201 |
-
|
| 202 |
try:
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
)
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
num_inference_steps=num_inference_steps,
|
| 222 |
-
width=width,
|
| 223 |
-
height=height,
|
| 224 |
-
generator=generator,
|
| 225 |
-
)
|
| 226 |
-
image = result.images[0]
|
| 227 |
-
else:
|
| 228 |
-
raise gen_error
|
| 229 |
-
|
| 230 |
-
# Cleanup
|
| 231 |
-
del result
|
| 232 |
-
gc.collect()
|
| 233 |
-
|
| 234 |
-
return image, seed, f"✅ Generated {width}x{height} image in {num_inference_steps} steps"
|
| 235 |
|
|
|
|
| 236 |
except Exception as e:
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
error_msg += "\n💡 Try: 256x256 resolution, 10 steps, or restart the space"
|
| 240 |
-
elif "CUDA" in str(e):
|
| 241 |
-
error_msg += "\n💡 CUDA error detected, using CPU fallback"
|
| 242 |
-
return None, seed, error_msg
|
| 243 |
|
| 244 |
-
#
|
| 245 |
-
def
|
| 246 |
-
"""
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
#
|
| 261 |
-
|
| 262 |
-
try:
|
| 263 |
-
import diffusers
|
| 264 |
-
diffusers_version = diffusers.__version__
|
| 265 |
-
# Check if diffusers version is in compatible range
|
| 266 |
-
if "0.21" <= diffusers_version < "0.25":
|
| 267 |
-
status.append(f"✅ Diffusers: {diffusers_version}")
|
| 268 |
-
else:
|
| 269 |
-
status.append(f"⚠️ Diffusers: {diffusers_version} (version mismatch)")
|
| 270 |
-
except Exception as e:
|
| 271 |
-
status.append(f"⚠️ Diffusers: Version unknown - {str(e)}")
|
| 272 |
-
else:
|
| 273 |
-
status.append("❌ Diffusers: Not available - Please install with: pip install diffusers>=0.21.0,<0.25.0")
|
| 274 |
-
|
| 275 |
-
# Check transformers
|
| 276 |
-
if TRANSFORMERS_AVAILABLE:
|
| 277 |
-
try:
|
| 278 |
-
import transformers
|
| 279 |
-
transformers_version = transformers.__version__
|
| 280 |
-
# Check if transformers version is in compatible range
|
| 281 |
-
if "4.25" <= transformers_version < "4.35":
|
| 282 |
-
status.append(f"✅ Transformers: {transformers_version}")
|
| 283 |
-
else:
|
| 284 |
-
status.append(f"⚠️ Transformers: {transformers_version} (version mismatch)")
|
| 285 |
-
except Exception as e:
|
| 286 |
-
status.append(f"⚠️ Transformers: Version unknown - {str(e)}")
|
| 287 |
-
else:
|
| 288 |
-
status.append("❌ Transformers: Not available")
|
| 289 |
-
|
| 290 |
-
# Check accelerate
|
| 291 |
-
try:
|
| 292 |
-
import accelerate
|
| 293 |
-
accelerate_version = accelerate.__version__
|
| 294 |
-
if accelerate_version >= "0.20":
|
| 295 |
-
status.append(f"✅ Accelerate: {accelerate_version}")
|
| 296 |
-
else:
|
| 297 |
-
status.append(f"⚠️ Accelerate: {accelerate_version} (needs update)")
|
| 298 |
-
except ImportError:
|
| 299 |
-
status.append("❌ Accelerate: Not available")
|
| 300 |
-
except Exception as e:
|
| 301 |
-
status.append(f"⚠️ Accelerate: Version unknown - {str(e)}")
|
| 302 |
-
|
| 303 |
-
# Check device
|
| 304 |
-
status.append(f"🖥️ Device: {device.upper()}")
|
| 305 |
-
status.append(f"🧵 CPU Threads: {torch.get_num_threads()}")
|
| 306 |
-
|
| 307 |
-
return "\n".join(status)
|
| 308 |
|
| 309 |
-
#
|
| 310 |
examples = [
|
| 311 |
-
"a
|
| 312 |
-
"
|
| 313 |
-
"
|
| 314 |
-
"medieval castle on a hill, fantasy art",
|
| 315 |
-
"astronaut in space, realistic style",
|
| 316 |
-
"cozy coffee shop interior, warm lighting"
|
| 317 |
]
|
| 318 |
|
| 319 |
-
#
|
| 320 |
css = """
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 324 |
padding: 20px;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
}
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
}
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
|
|
|
| 339 |
}
|
| 340 |
-
.status-success { background-color: #d4edda; color: #155724; padding: 10px; border-radius: 5px; }
|
| 341 |
-
.status-error { background-color: #f8d7da; color: #721c24; padding: 10px; border-radius: 5px; }
|
| 342 |
"""
|
| 343 |
|
| 344 |
-
#
|
| 345 |
-
with gr.Blocks(css=css, title="
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
with gr.Column(scale=3):
|
| 372 |
-
prompt = gr.Textbox(
|
| 373 |
-
label="✨ Describe your image",
|
| 374 |
-
placeholder="Enter your creative prompt here...",
|
| 375 |
-
lines=3
|
| 376 |
-
)
|
| 377 |
-
with gr.Column(scale=1):
|
| 378 |
-
generate_btn = gr.Button(
|
| 379 |
-
"🎨 Generate",
|
| 380 |
-
variant="primary",
|
| 381 |
-
size="lg"
|
| 382 |
-
)
|
| 383 |
-
|
| 384 |
-
# Model selection
|
| 385 |
-
model_dropdown = gr.Dropdown(
|
| 386 |
-
choices=list(MODEL_CONFIGS.keys()),
|
| 387 |
-
value="⚡ Runway SD 1.5 (Recommended)",
|
| 388 |
-
label="🤖 AI Model",
|
| 389 |
-
info="All models optimized for CPU usage"
|
| 390 |
-
)
|
| 391 |
-
|
| 392 |
-
# Model info
|
| 393 |
-
model_info = gr.Markdown("", elem_classes="model-info")
|
| 394 |
-
|
| 395 |
-
# Result
|
| 396 |
-
result_image = gr.Image(label="Generated Image", height=400)
|
| 397 |
-
status_text = gr.Markdown("🚀 Ready to generate!", elem_classes="status-success")
|
| 398 |
-
|
| 399 |
-
# Settings
|
| 400 |
-
with gr.Accordion("⚙️ Generation Settings", open=False):
|
| 401 |
-
negative_prompt = gr.Textbox(
|
| 402 |
-
label="🚫 Negative Prompt",
|
| 403 |
-
placeholder="What you don't want...",
|
| 404 |
-
lines=2
|
| 405 |
)
|
| 406 |
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 410 |
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 414 |
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
|
|
|
|
|
|
| 418 |
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 428 |
""")
|
| 429 |
-
|
| 430 |
-
# Examples
|
| 431 |
-
gr.Examples(examples=examples, inputs=[prompt])
|
| 432 |
-
|
| 433 |
-
# Footer
|
| 434 |
-
gr.Markdown("---")
|
| 435 |
-
gr.Markdown("🖥️ **CPU Optimized** | Generation time: 30s-3min depending on settings")
|
| 436 |
|
| 437 |
-
#
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
config = MODEL_CONFIGS[model_name]
|
| 441 |
-
info = f"""
|
| 442 |
-
**Memory Usage:** {config['memory_usage']} | **Max Resolution:** {config['max_resolution']}px
|
| 443 |
-
**Recommended:** {config['default_steps']} steps, {config['default_guidance']} guidance
|
| 444 |
-
"""
|
| 445 |
-
return info, config['default_steps'], config['default_guidance']
|
| 446 |
-
return "", 15, 7.5
|
| 447 |
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
)
|
| 454 |
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
outputs=[model_info, num_inference_steps, guidance_scale]
|
| 470 |
)
|
| 471 |
|
| 472 |
if __name__ == "__main__":
|
| 473 |
-
demo.launch(
|
| 474 |
-
share=True,
|
| 475 |
-
server_name="0.0.0.0",
|
| 476 |
-
server_port=7860,
|
| 477 |
-
show_error=True,
|
| 478 |
-
quiet=True
|
| 479 |
-
)
|
|
|
|
| 1 |
+
import types
|
|
|
|
| 2 |
import random
|
| 3 |
+
import spaces
|
| 4 |
+
import logging
|
|
|
|
|
|
|
| 5 |
import os
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torchaudio
|
| 12 |
+
from diffusers import AutoencoderKLWan, UniPCMultistepScheduler
|
| 13 |
+
from diffusers.utils import export_to_video
|
| 14 |
+
from diffusers import AutoModel
|
| 15 |
+
import gradio as gr
|
| 16 |
+
import tempfile
|
| 17 |
+
from huggingface_hub import hf_hub_download
|
| 18 |
|
| 19 |
+
from src.pipeline_wan_nag import NAGWanPipeline
|
| 20 |
+
from src.transformer_wan_nag import NagWanTransformer3DModel
|
|
|
|
| 21 |
|
| 22 |
+
# MMAudio imports
|
| 23 |
try:
|
| 24 |
+
import mmaudio
|
|
|
|
|
|
|
| 25 |
except ImportError:
|
| 26 |
+
os.system("pip install -e .")
|
| 27 |
+
import mmaudio
|
| 28 |
|
| 29 |
+
from mmaudio.eval_utils import (ModelConfig, all_model_cfg, generate as mmaudio_generate,
|
| 30 |
+
load_video, make_video, setup_eval_logging)
|
| 31 |
+
from mmaudio.model.flow_matching import FlowMatching
|
| 32 |
+
from mmaudio.model.networks import MMAudio, get_my_mmaudio
|
| 33 |
+
from mmaudio.model.sequence_config import SequenceConfig
|
| 34 |
+
from mmaudio.model.utils.features_utils import FeaturesUtils
|
| 35 |
+
|
| 36 |
+
# NAG Video Settings
|
| 37 |
+
MOD_VALUE = 32
|
| 38 |
+
DEFAULT_DURATION_SECONDS = 4
|
| 39 |
+
DEFAULT_STEPS = 4
|
| 40 |
+
DEFAULT_SEED = 2025
|
| 41 |
+
DEFAULT_H_SLIDER_VALUE = 480
|
| 42 |
+
DEFAULT_W_SLIDER_VALUE = 832
|
| 43 |
+
NEW_FORMULA_MAX_AREA = 480.0 * 832.0
|
| 44 |
|
| 45 |
+
SLIDER_MIN_H, SLIDER_MAX_H = 128, 896
|
| 46 |
+
SLIDER_MIN_W, SLIDER_MAX_W = 128, 896
|
|
|
|
| 47 |
MAX_SEED = np.iinfo(np.int32).max
|
| 48 |
|
| 49 |
+
FIXED_FPS = 16
|
| 50 |
+
MIN_FRAMES_MODEL = 8
|
| 51 |
+
MAX_FRAMES_MODEL = 129
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
+
DEFAULT_NAG_NEGATIVE_PROMPT = "Estático, inmóvil, quieto, feo, mala calidad, peor calidad, mal dibujado, baja resolución, borroso, falta de detalles"
|
| 54 |
+
DEFAULT_AUDIO_NEGATIVE_PROMPT = "música"
|
|
|
|
| 55 |
|
| 56 |
+
# NAG Model Settings
|
| 57 |
+
MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
|
| 58 |
+
SUB_MODEL_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
| 59 |
+
SUB_MODEL_FILENAME = "Wan14BT2VFusioniX_fp16_.safetensors"
|
| 60 |
+
LORA_REPO_ID = "Kijai/WanVideo_comfy"
|
| 61 |
+
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
+
# MMAudio Settings
|
| 64 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 65 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 66 |
+
log = logging.getLogger()
|
| 67 |
+
device = 'cuda'
|
| 68 |
+
dtype = torch.bfloat16
|
| 69 |
+
audio_model_config: ModelConfig = all_model_cfg['large_44k_v2']
|
| 70 |
+
audio_model_config.download_if_needed()
|
| 71 |
+
setup_eval_logging()
|
| 72 |
+
|
| 73 |
+
# Initialize NAG Video Model
|
| 74 |
+
try:
|
| 75 |
+
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
|
| 76 |
+
wan_path = hf_hub_download(repo_id=SUB_MODEL_ID, filename=SUB_MODEL_FILENAME)
|
| 77 |
+
transformer = NagWanTransformer3DModel.from_single_file(wan_path, torch_dtype=torch.bfloat16)
|
| 78 |
+
pipe = NAGWanPipeline.from_pretrained(
|
| 79 |
+
MODEL_ID, vae=vae, transformer=transformer, torch_dtype=torch.bfloat16
|
| 80 |
+
)
|
| 81 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=5.0)
|
| 82 |
+
pipe.to("cuda")
|
| 83 |
+
|
| 84 |
+
pipe.transformer.__class__.attn_processors = NagWanTransformer3DModel.attn_processors
|
| 85 |
+
pipe.transformer.__class__.set_attn_processor = NagWanTransformer3DModel.set_attn_processor
|
| 86 |
+
pipe.transformer.__class__.forward = NagWanTransformer3DModel.forward
|
| 87 |
+
print("¡Modelo de video NAG cargado exitosamente!")
|
| 88 |
+
except Exception as e:
|
| 89 |
+
print(f"Error cargando modelo de video NAG: {e}")
|
| 90 |
+
pipe = None
|
| 91 |
+
|
| 92 |
+
# Initialize MMAudio Model
|
| 93 |
+
def get_mmaudio_model() -> tuple[MMAudio, FeaturesUtils, SequenceConfig]:
|
| 94 |
+
seq_cfg = audio_model_config.seq_cfg
|
| 95 |
|
| 96 |
+
net: MMAudio = get_my_mmaudio(audio_model_config.model_name).to(device, dtype).eval()
|
| 97 |
+
net.load_weights(torch.load(audio_model_config.model_path, map_location=device, weights_only=True))
|
| 98 |
+
log.info(f'Cargados pesos de MMAudio desde {audio_model_config.model_path}')
|
| 99 |
|
| 100 |
+
feature_utils = FeaturesUtils(tod_vae_ckpt=audio_model_config.vae_path,
|
| 101 |
+
synchformer_ckpt=audio_model_config.synchformer_ckpt,
|
| 102 |
+
enable_conditions=True,
|
| 103 |
+
mode=audio_model_config.mode,
|
| 104 |
+
bigvgan_vocoder_ckpt=audio_model_config.bigvgan_16k_path,
|
| 105 |
+
need_vae_encoder=False)
|
| 106 |
+
feature_utils = feature_utils.to(device, dtype).eval()
|
| 107 |
|
| 108 |
+
return net, feature_utils, seq_cfg
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
audio_net, audio_feature_utils, audio_seq_cfg = get_mmaudio_model()
|
| 112 |
+
print("MMAudio Model loaded successfully!")
|
| 113 |
+
except Exception as e:
|
| 114 |
+
print(f"Error loading MMAudio Model: {e}")
|
| 115 |
+
audio_net = None
|
| 116 |
+
|
| 117 |
+
# Audio generation function
|
| 118 |
+
@torch.inference_mode()
|
| 119 |
+
def add_audio_to_video(video_path, prompt, audio_negative_prompt, audio_steps, audio_cfg_strength, duration):
|
| 120 |
+
"""Añadir audio automático al video generado"""
|
| 121 |
try:
|
| 122 |
+
net, feature_utils, seq_cfg = get_mmaudio_model()
|
| 123 |
|
| 124 |
+
# Generar audio usando el mismo prompt del video
|
| 125 |
+
audio_output = mmaudio_generate(
|
| 126 |
+
net, feature_utils, seq_cfg,
|
| 127 |
+
prompt, audio_negative_prompt,
|
| 128 |
+
audio_steps, audio_cfg_strength, duration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
)
|
| 130 |
|
| 131 |
+
# Combinar video con audio
|
| 132 |
+
final_video_path = make_video(video_path, audio_output, duration)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
+
return final_video_path
|
| 135 |
except Exception as e:
|
| 136 |
+
log.error(f"Error generando audio: {e}")
|
| 137 |
+
return video_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
+
# Combined generation function
|
| 140 |
+
def get_duration(prompt, nag_negative_prompt, nag_scale, height, width, duration_seconds,
|
| 141 |
+
steps, seed, randomize_seed, enable_audio, audio_negative_prompt,
|
| 142 |
+
audio_steps, audio_cfg_strength):
|
| 143 |
+
# Calcular duración total incluyendo procesamiento de audio si está habilitado
|
| 144 |
+
base_duration = 30 # Duración base para generación de video
|
| 145 |
+
if enable_audio:
|
| 146 |
+
audio_duration = 20 # Duración adicional para generación de audio
|
| 147 |
+
return base_duration + audio_duration
|
| 148 |
+
return base_duration
|
| 149 |
+
|
| 150 |
+
@spaces.GPU(duration=get_duration)
|
| 151 |
+
def generate_video_with_audio(
|
| 152 |
+
prompt,
|
| 153 |
+
nag_negative_prompt, nag_scale,
|
| 154 |
+
height=DEFAULT_H_SLIDER_VALUE, width=DEFAULT_W_SLIDER_VALUE, duration_seconds=DEFAULT_DURATION_SECONDS,
|
| 155 |
+
steps=DEFAULT_STEPS,
|
| 156 |
+
seed=DEFAULT_SEED, randomize_seed=False,
|
| 157 |
+
enable_audio=True, audio_negative_prompt=DEFAULT_AUDIO_NEGATIVE_PROMPT,
|
| 158 |
+
audio_steps=25, audio_cfg_strength=4.5,
|
| 159 |
+
):
|
| 160 |
if pipe is None:
|
| 161 |
+
return None, DEFAULT_SEED
|
| 162 |
+
|
| 163 |
try:
|
| 164 |
+
# Generar video primero
|
| 165 |
+
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
|
| 166 |
+
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
|
| 167 |
|
| 168 |
+
num_frames = np.clip(int(round(int(duration_seconds) * FIXED_FPS) + 1), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
|
| 169 |
|
| 170 |
+
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
+
print(f"Generando video con: prompt='{prompt}', resolución={target_w}x{target_h}, frames={num_frames}")
|
|
|
|
|
|
|
| 173 |
|
| 174 |
+
with torch.inference_mode():
|
| 175 |
+
nag_output_frames_list = pipe(
|
| 176 |
+
prompt=prompt,
|
| 177 |
+
nag_negative_prompt=nag_negative_prompt,
|
| 178 |
+
nag_scale=nag_scale,
|
| 179 |
+
nag_tau=3.5,
|
| 180 |
+
nag_alpha=0.5,
|
| 181 |
+
height=target_h, width=target_w, num_frames=num_frames,
|
| 182 |
+
guidance_scale=0.,
|
| 183 |
+
num_inference_steps=int(steps),
|
| 184 |
+
generator=torch.Generator(device="cuda").manual_seed(current_seed)
|
| 185 |
+
).frames[0]
|
| 186 |
|
| 187 |
+
# Guardar video inicial sin audio
|
| 188 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
| 189 |
+
temp_video_path = tmpfile.name
|
| 190 |
+
export_to_video(nag_output_frames_list, temp_video_path, fps=FIXED_FPS)
|
| 191 |
+
print(f"Video guardado en: {temp_video_path}")
|
| 192 |
|
| 193 |
+
# Añadir audio si está habilitado
|
| 194 |
+
if enable_audio:
|
| 195 |
try:
|
| 196 |
+
print("Añadiendo audio al video...")
|
| 197 |
+
final_video_path = add_audio_to_video(
|
| 198 |
+
temp_video_path,
|
| 199 |
+
prompt, # Usar el mismo prompt para generación de audio
|
| 200 |
+
audio_negative_prompt,
|
| 201 |
+
audio_steps,
|
| 202 |
+
audio_cfg_strength,
|
| 203 |
+
duration_seconds
|
| 204 |
)
|
| 205 |
+
# Limpiar video temporal
|
| 206 |
+
if os.path.exists(temp_video_path) and final_video_path != temp_video_path:
|
| 207 |
+
os.remove(temp_video_path)
|
| 208 |
+
print(f"Video final con audio: {final_video_path}")
|
| 209 |
+
except Exception as e:
|
| 210 |
+
log.error(f"Falló la generación de audio: {e}")
|
| 211 |
+
final_video_path = temp_video_path
|
| 212 |
+
else:
|
| 213 |
+
final_video_path = temp_video_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
|
| 215 |
+
return final_video_path, current_seed
|
| 216 |
except Exception as e:
|
| 217 |
+
print(f"Error en generación de video: {e}")
|
| 218 |
+
return None, current_seed
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
+
# Example generation function - simplified
|
| 221 |
+
def set_example(prompt, nag_negative_prompt, nag_scale):
|
| 222 |
+
"""Establecer valores de ejemplo en la UI sin activar generación"""
|
| 223 |
+
return (
|
| 224 |
+
prompt,
|
| 225 |
+
nag_negative_prompt,
|
| 226 |
+
nag_scale,
|
| 227 |
+
DEFAULT_H_SLIDER_VALUE,
|
| 228 |
+
DEFAULT_W_SLIDER_VALUE,
|
| 229 |
+
DEFAULT_DURATION_SECONDS,
|
| 230 |
+
DEFAULT_STEPS,
|
| 231 |
+
DEFAULT_SEED,
|
| 232 |
+
True, # randomize_seed
|
| 233 |
+
True, # enable_audio
|
| 234 |
+
DEFAULT_AUDIO_NEGATIVE_PROMPT,
|
| 235 |
+
25, # audio_steps
|
| 236 |
+
4.5 # audio_cfg_strength
|
| 237 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
+
# Examples with audio descriptions
|
| 240 |
examples = [
|
| 241 |
+
["Autopista de medianoche fuera de una ciudad iluminada con neón. Un Porsche 911 Carrera RS negro de 1973 acelera a 120 km/h. Dentro, un cantante-guitarrista elegante canta mientras conduce, guitarra vintage sunburst en el asiento del pasajero. Las luces de sodio de la calle se deslizan sobre el capó; paneles RGB cambian de magenta a azul en el conductor. Cámara: inmersión de dron, toma baja de rueda con brazo ruso, gimbal interior, barrel roll FPV, espiral aérea. Paleta neo-noir, reflejos de asfalto mojado por lluvia, rugido del motor flat-six mezclado con guitarra en vivo.", DEFAULT_NAG_NEGATIVE_PROMPT, 11],
|
| 242 |
+
["Concierto de rock en arena lleno con 20,000 fanáticos. Un guitarrista principal extravagante con chaqueta de cuero y aviators espejados hace shred en una Flying V cereza-roja en un escenario elevado. Llamas de pirotecnia se disparan en cada downbeat, chorros de CO₂ estallan detrás. Luces móviles giran en turquesa y ámbar, follow-spots iluminan el pelo del guitarrista. Steadicam órbita 360°, toma de grúa elevándose sobre la multitud, ultra cámara lenta del ataque de púa a 1,000 fps. Grado de película turquesa-naranja, rugido ensordecedor de la multitud mezclado con solo de guitarra chillón.", DEFAULT_NAG_NEGATIVE_PROMPT, 11],
|
| 243 |
+
["Camino rural de hora dorada serpenteando a través de campos de trigo ondulantes. Un hombre y una mujer montan una motocicleta café-racer vintage, pelo y bufanda ondeando en la brisa cálida. Toma de persecución con dron revela campos agrícolas infinitos; slider bajo a lo largo de la rueda trasera captura estela de polvo. Luz de sol retroilumina a los jinetes, bloom de lente en los reflejos. Underscore de rock acústico suave; rugido del motor mezclado a -8 dB. Grado de color cálido pastel, grano de película suave para ambiente nostálgico.", DEFAULT_NAG_NEGATIVE_PROMPT, 11],
|
|
|
|
|
|
|
|
|
|
| 244 |
]
|
| 245 |
|
| 246 |
+
# CSS styling - Fixed for better layout
|
| 247 |
css = """
|
| 248 |
+
/* Columna derecha - salida de video */
|
| 249 |
+
.video-output {
|
| 250 |
+
min-height: 600px;
|
| 251 |
+
border: 2px dashed #e5e7eb;
|
| 252 |
+
border-radius: 12px;
|
| 253 |
+
display: flex;
|
| 254 |
+
align-items: center;
|
| 255 |
+
justify-content: center;
|
| 256 |
+
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
/* Botón de generación */
|
| 260 |
+
.generate-btn {
|
| 261 |
+
background: linear-gradient(135deg, #8b5cf6 0%, #7c3aed 100%) !important;
|
| 262 |
+
border: none !important;
|
| 263 |
+
color: white !important;
|
| 264 |
+
font-weight: 600 !important;
|
| 265 |
+
padding: 16px 32px !important;
|
| 266 |
+
border-radius: 12px !important;
|
| 267 |
+
transition: all 0.3s ease !important;
|
| 268 |
+
box-shadow: 0 4px 15px rgba(139, 92, 246, 0.3) !important;
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
.generate-btn:hover {
|
| 272 |
+
transform: translateY(-2px) !important;
|
| 273 |
+
box-shadow: 0 8px 25px rgba(139, 92, 246, 0.4) !important;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
/* Configuración de audio */
|
| 277 |
+
.audio-settings {
|
| 278 |
+
background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
|
| 279 |
+
border-radius: 12px;
|
| 280 |
+
padding: 20px;
|
| 281 |
+
margin: 20px 0;
|
| 282 |
+
border: 1px solid #f59e0b;
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
/* Configuración de video */
|
| 286 |
+
.video-settings {
|
| 287 |
+
background: linear-gradient(135deg, #dbeafe 0%, #bfdbfe 100%);
|
| 288 |
+
border-radius: 12px;
|
| 289 |
padding: 20px;
|
| 290 |
+
margin: 20px 0;
|
| 291 |
+
border: 1px solid #3b82f6;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
/* Títulos de sección */
|
| 295 |
+
.section-title {
|
| 296 |
+
color: #1f2937;
|
| 297 |
+
font-weight: 700;
|
| 298 |
+
margin-bottom: 16px;
|
| 299 |
+
font-size: 1.25rem;
|
| 300 |
}
|
| 301 |
+
|
| 302 |
+
/* Tooltips y información */
|
| 303 |
+
.info-text {
|
| 304 |
+
color: #6b7280;
|
| 305 |
+
font-size: 0.875rem;
|
| 306 |
+
margin-top: 4px;
|
| 307 |
}
|
| 308 |
+
|
| 309 |
+
/* Ejemplos */
|
| 310 |
+
.examples-section {
|
| 311 |
+
background: linear-gradient(135deg, #f3f4f6 0%, #e5e7eb 100%);
|
| 312 |
+
border-radius: 12px;
|
| 313 |
+
padding: 20px;
|
| 314 |
+
margin-top: 20px;
|
| 315 |
}
|
|
|
|
|
|
|
| 316 |
"""
|
| 317 |
|
| 318 |
+
# Crear la interfaz de Gradio
|
| 319 |
+
with gr.Blocks(css=css, title="🎬 VEO3 Free - Generador de Video con IA", theme=gr.themes.Soft()) as demo:
|
| 320 |
+
gr.Markdown("""
|
| 321 |
+
# 🎬 VEO3 Free - Generador de Video con IA
|
| 322 |
+
|
| 323 |
+
### Genera videos profesionales con audio automático usando inteligencia artificial
|
| 324 |
+
|
| 325 |
+
**Características principales:**
|
| 326 |
+
- 🎥 Generación de video de alta calidad con modelo Wan2.1-T2V-14B
|
| 327 |
+
- 🔊 Audio automático sincronizado con el contenido visual
|
| 328 |
+
- ⚡ Generación rápida de 4 pasos con tecnología NAG
|
| 329 |
+
- 🎨 Resoluciones personalizables de 128x128 a 896x896
|
| 330 |
+
- 🎯 Duración ajustable de 1 a 8 segundos
|
| 331 |
+
|
| 332 |
+
---
|
| 333 |
+
""")
|
| 334 |
+
|
| 335 |
+
with gr.Row():
|
| 336 |
+
with gr.Column(scale=4):
|
| 337 |
+
# Sección de prompt principal
|
| 338 |
+
gr.Markdown("### 📝 Descripción del Video (también se usa para generación de audio)")
|
| 339 |
+
prompt = gr.Textbox(
|
| 340 |
+
label="Describe tu escena de video en detalle...",
|
| 341 |
+
placeholder="Ej: Un carro deportivo rojo acelerando por una autopista desierta al atardecer, cámara en movimiento desde un dron...",
|
| 342 |
+
lines=4,
|
| 343 |
+
max_lines=8,
|
| 344 |
+
interactive=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
)
|
| 346 |
|
| 347 |
+
# Configuración avanzada de video
|
| 348 |
+
with gr.Group(elem_classes="video-settings"):
|
| 349 |
+
gr.Markdown("### 🎥 Configuración Avanzada de Video")
|
| 350 |
+
|
| 351 |
+
with gr.Row():
|
| 352 |
+
nag_negative_prompt = gr.Textbox(
|
| 353 |
+
label="Prompt Negativo del Video",
|
| 354 |
+
value=DEFAULT_NAG_NEGATIVE_PROMPT,
|
| 355 |
+
placeholder="Elementos a evitar en el video (ej: estático, borroso, mala calidad)",
|
| 356 |
+
lines=2
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
with gr.Row():
|
| 360 |
+
nag_scale = gr.Slider(
|
| 361 |
+
minimum=1.0,
|
| 362 |
+
maximum=20.0,
|
| 363 |
+
step=0.5,
|
| 364 |
+
value=11.0,
|
| 365 |
+
label="🎛️ Escala NAG",
|
| 366 |
+
info="Mayor escala = mayor adherencia al prompt"
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
with gr.Row():
|
| 370 |
+
duration_seconds_input = gr.Slider(
|
| 371 |
+
minimum=1,
|
| 372 |
+
maximum=8,
|
| 373 |
+
step=1,
|
| 374 |
+
value=DEFAULT_DURATION_SECONDS,
|
| 375 |
+
label="⏱️ Duración (segundos)",
|
| 376 |
+
info="Duración del video generado"
|
| 377 |
+
)
|
| 378 |
+
steps_slider = gr.Slider(
|
| 379 |
+
minimum=1,
|
| 380 |
+
maximum=8,
|
| 381 |
+
step=1,
|
| 382 |
+
value=DEFAULT_STEPS,
|
| 383 |
+
label="🔄 Pasos de Inferencia",
|
| 384 |
+
info="Más pasos = mejor calidad, pero más lento"
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
with gr.Row():
|
| 388 |
+
height_input = gr.Slider(
|
| 389 |
+
minimum=SLIDER_MIN_H,
|
| 390 |
+
maximum=SLIDER_MAX_H,
|
| 391 |
+
step=32,
|
| 392 |
+
value=DEFAULT_H_SLIDER_VALUE,
|
| 393 |
+
label="📏 Altura (x32)",
|
| 394 |
+
info="Altura del video en píxeles"
|
| 395 |
+
)
|
| 396 |
+
width_input = gr.Slider(
|
| 397 |
+
minimum=SLIDER_MIN_W,
|
| 398 |
+
maximum=SLIDER_MAX_W,
|
| 399 |
+
step=32,
|
| 400 |
+
value=DEFAULT_W_SLIDER_VALUE,
|
| 401 |
+
label="📐 Ancho (x32)",
|
| 402 |
+
info="Ancho del video en píxeles"
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
with gr.Row():
|
| 406 |
+
seed_input = gr.Number(
|
| 407 |
+
label="🎲 Semilla",
|
| 408 |
+
value=DEFAULT_SEED,
|
| 409 |
+
interactive=True
|
| 410 |
+
)
|
| 411 |
+
randomize_seed_checkbox = gr.Checkbox(
|
| 412 |
+
label="🎲 Semilla Aleatoria",
|
| 413 |
+
value=True,
|
| 414 |
+
interactive=True
|
| 415 |
+
)
|
| 416 |
|
| 417 |
+
# Configuración de generación de audio
|
| 418 |
+
with gr.Group(elem_classes="audio-settings"):
|
| 419 |
+
gr.Markdown("### 🎵 Configuración de Generación de Audio")
|
| 420 |
+
|
| 421 |
+
enable_audio = gr.Checkbox(
|
| 422 |
+
label="🔊 Habilitar Generación Automática de Audio",
|
| 423 |
+
value=True,
|
| 424 |
+
interactive=True
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
with gr.Column(visible=True) as audio_settings_group:
|
| 428 |
+
audio_negative_prompt = gr.Textbox(
|
| 429 |
+
label="Prompt Negativo del Audio",
|
| 430 |
+
value=DEFAULT_AUDIO_NEGATIVE_PROMPT,
|
| 431 |
+
placeholder="Elementos a evitar en el audio (ej: música, habla)",
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
with gr.Row():
|
| 435 |
+
audio_steps = gr.Slider(
|
| 436 |
+
minimum=10,
|
| 437 |
+
maximum=50,
|
| 438 |
+
step=5,
|
| 439 |
+
value=25,
|
| 440 |
+
label="🎚️ Pasos de Audio",
|
| 441 |
+
info="Más pasos = mejor calidad"
|
| 442 |
+
)
|
| 443 |
+
audio_cfg_strength = gr.Slider(
|
| 444 |
+
minimum=1.0,
|
| 445 |
+
maximum=10.0,
|
| 446 |
+
step=0.5,
|
| 447 |
+
value=4.5,
|
| 448 |
+
label="🎛️ Guía de Audio",
|
| 449 |
+
info="Fuerza de la guía del prompt"
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
# Alternar visibilidad de configuración de audio
|
| 453 |
+
enable_audio.change(
|
| 454 |
+
fn=lambda x: gr.update(visible=x),
|
| 455 |
+
inputs=[enable_audio],
|
| 456 |
+
outputs=[audio_settings_group]
|
| 457 |
+
)
|
| 458 |
|
| 459 |
+
generate_button = gr.Button(
|
| 460 |
+
"🎬 Generar Video con Audio",
|
| 461 |
+
variant="primary",
|
| 462 |
+
elem_classes="generate-btn"
|
| 463 |
+
)
|
| 464 |
|
| 465 |
+
with gr.Column(scale=5):
|
| 466 |
+
video_output = gr.Video(
|
| 467 |
+
label="Video Generado con Audio",
|
| 468 |
+
autoplay=True,
|
| 469 |
+
interactive=False,
|
| 470 |
+
elem_classes="video-output",
|
| 471 |
+
height=600
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
gr.HTML("""
|
| 475 |
+
<div style="text-align: center; margin-top: 20px; color: #6b7280;">
|
| 476 |
+
<p>💡 Consejo: ¡El mismo prompt se usa para la generación de video y audio!</p>
|
| 477 |
+
<p>🎧 El audio se combina automáticamente con el contenido visual</p>
|
| 478 |
+
</div>
|
| 479 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 480 |
|
| 481 |
+
# Sección de ejemplos movida fuera de las columnas
|
| 482 |
+
with gr.Row():
|
| 483 |
+
gr.Markdown("### 🎯 Prompts de Ejemplo")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 484 |
|
| 485 |
+
gr.Examples(
|
| 486 |
+
examples=examples,
|
| 487 |
+
inputs=[prompt, nag_negative_prompt, nag_scale],
|
| 488 |
+
outputs=None, # No conectar salidas para evitar problemas de índice
|
| 489 |
+
cache_examples=False
|
| 490 |
)
|
| 491 |
|
| 492 |
+
# Conectar elementos de la UI
|
| 493 |
+
ui_inputs = [
|
| 494 |
+
prompt,
|
| 495 |
+
nag_negative_prompt, nag_scale,
|
| 496 |
+
height_input, width_input, duration_seconds_input,
|
| 497 |
+
steps_slider,
|
| 498 |
+
seed_input, randomize_seed_checkbox,
|
| 499 |
+
enable_audio, audio_negative_prompt, audio_steps, audio_cfg_strength,
|
| 500 |
+
]
|
| 501 |
|
| 502 |
+
generate_button.click(
|
| 503 |
+
fn=generate_video_with_audio,
|
| 504 |
+
inputs=ui_inputs,
|
| 505 |
+
outputs=[video_output, seed_input],
|
|
|
|
| 506 |
)
|
| 507 |
|
| 508 |
if __name__ == "__main__":
|
| 509 |
+
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Configuración personalizada para VEO3 Free - Versión Latinoamérica
|
| 2 |
+
# Custom configuration for VEO3 Free - Latin America Version
|
| 3 |
+
|
| 4 |
+
# Configuración de la aplicación
|
| 5 |
+
APP_CONFIG = {
|
| 6 |
+
"title": "🎬 VEO3 Free - Generador de Video con IA",
|
| 7 |
+
"description": "Genera videos profesionales con audio automático usando inteligencia artificial",
|
| 8 |
+
"version": "1.0.0",
|
| 9 |
+
"author": "NTIA Team",
|
| 10 |
+
"region": "Latinoamérica",
|
| 11 |
+
"language": "español"
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
# Configuración de la interfaz
|
| 15 |
+
UI_CONFIG = {
|
| 16 |
+
"theme": "soft",
|
| 17 |
+
"primary_color": "#8b5cf6", # Púrpura
|
| 18 |
+
"secondary_color": "#3b82f6", # Azul
|
| 19 |
+
"accent_color": "#f59e0b", # Naranja
|
| 20 |
+
"success_color": "#10b981", # Verde
|
| 21 |
+
"warning_color": "#f59e0b", # Amarillo
|
| 22 |
+
"error_color": "#ef4444", # Rojo
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
# Configuración de video por defecto
|
| 26 |
+
VIDEO_CONFIG = {
|
| 27 |
+
"default_duration": 4,
|
| 28 |
+
"default_steps": 4,
|
| 29 |
+
"default_height": 480,
|
| 30 |
+
"default_width": 832,
|
| 31 |
+
"default_seed": 2025,
|
| 32 |
+
"default_nag_scale": 11.0,
|
| 33 |
+
"min_duration": 1,
|
| 34 |
+
"max_duration": 8,
|
| 35 |
+
"min_steps": 1,
|
| 36 |
+
"max_steps": 8,
|
| 37 |
+
"min_height": 128,
|
| 38 |
+
"max_height": 896,
|
| 39 |
+
"min_width": 128,
|
| 40 |
+
"max_width": 896,
|
| 41 |
+
"fps": 16
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
# Configuración de audio por defecto
|
| 45 |
+
AUDIO_CONFIG = {
|
| 46 |
+
"default_enabled": True,
|
| 47 |
+
"default_steps": 25,
|
| 48 |
+
"default_cfg_strength": 4.5,
|
| 49 |
+
"min_steps": 10,
|
| 50 |
+
"max_steps": 50,
|
| 51 |
+
"min_cfg_strength": 1.0,
|
| 52 |
+
"max_cfg_strength": 10.0
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
# Prompts negativos por defecto
|
| 56 |
+
NEGATIVE_PROMPTS = {
|
| 57 |
+
"video": "Estático, inmóvil, quieto, feo, mala calidad, peor calidad, mal dibujado, baja resolución, borroso, falta de detalles",
|
| 58 |
+
"audio": "música"
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
# Ejemplos de prompts en español
|
| 62 |
+
EXAMPLE_PROMPTS = [
|
| 63 |
+
{
|
| 64 |
+
"title": "Autopista de Medianoche",
|
| 65 |
+
"prompt": "Autopista de medianoche fuera de una ciudad iluminada con neón. Un Porsche 911 Carrera RS negro de 1973 acelera a 120 km/h. Dentro, un cantante-guitarrista elegante canta mientras conduce, guitarra vintage sunburst en el asiento del pasajero. Las luces de sodio de la calle se deslizan sobre el capó; paneles RGB cambian de magenta a azul en el conductor. Cámara: inmersión de dron, toma baja de rueda con brazo ruso, gimbal interior, barrel roll FPV, espiral aérea. Paleta neo-noir, reflejos de asfalto mojado por lluvia, rugido del motor flat-six mezclado con guitarra en vivo.",
|
| 66 |
+
"negative_prompt": NEGATIVE_PROMPTS["video"],
|
| 67 |
+
"nag_scale": 11.0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"title": "Concierto de Rock",
|
| 71 |
+
"prompt": "Concierto de rock en arena lleno con 20,000 fanáticos. Un guitarrista principal extravagante con chaqueta de cuero y aviators espejados hace shred en una Flying V cereza-roja en un escenario elevado. Llamas de pirotecnia se disparan en cada downbeat, chorros de CO₂ estallan detrás. Luces móviles giran en turquesa y ámbar, follow-spots iluminan el pelo del guitarrista. Steadicam órbita 360°, toma de grúa elevándose sobre la multitud, ultra cámara lenta del ataque de púa a 1,000 fps. Grado de película turquesa-naranja, rugido ensordecedor de la multitud mezclado con solo de guitarra chillón.",
|
| 72 |
+
"negative_prompt": NEGATIVE_PROMPTS["video"],
|
| 73 |
+
"nag_scale": 11.0
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"title": "Camino Rural",
|
| 77 |
+
"prompt": "Camino rural de hora dorada serpenteando a través de campos de trigo ondulantes. Un hombre y una mujer montan una motocicleta café-racer vintage, pelo y bufanda ondeando en la brisa cálida. Toma de persecución con dron revela campos agrícolas infinitos; slider bajo a lo largo de la rueda trasera captura estela de polvo. Luz de sol retroilumina a los jinetes, bloom de lente en los reflejos. Underscore de rock acústico suave; rugido del motor mezclado a -8 dB. Grado de color cálido pastel, grano de película suave para ambiente nostálgico.",
|
| 78 |
+
"negative_prompt": NEGATIVE_PROMPTS["video"],
|
| 79 |
+
"nag_scale": 11.0
|
| 80 |
+
}
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
# Configuración de modelos
|
| 84 |
+
MODEL_CONFIG = {
|
| 85 |
+
"video_model": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
|
| 86 |
+
"sub_model": "vrgamedevgirl84/Wan14BT2VFusioniX",
|
| 87 |
+
"lora_model": "Kijai/WanVideo_comfy",
|
| 88 |
+
"audio_model": "large_44k_v2"
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
# Configuración de rendimiento
|
| 92 |
+
PERFORMANCE_CONFIG = {
|
| 93 |
+
"gpu_memory_fraction": 0.9,
|
| 94 |
+
"enable_mixed_precision": True,
|
| 95 |
+
"enable_attention_slicing": True,
|
| 96 |
+
"enable_vae_slicing": True,
|
| 97 |
+
"max_batch_size": 1
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
# Mensajes de la interfaz
|
| 101 |
+
MESSAGES = {
|
| 102 |
+
"loading": "Cargando modelo...",
|
| 103 |
+
"generating": "Generando video...",
|
| 104 |
+
"adding_audio": "Añadiendo audio...",
|
| 105 |
+
"complete": "¡Video generado exitosamente!",
|
| 106 |
+
"error": "Error en la generación",
|
| 107 |
+
"tips": {
|
| 108 |
+
"prompt": "💡 Usa descripciones detalladas y cinematográficas para mejores resultados",
|
| 109 |
+
"audio": "🎧 El audio se genera automáticamente basado en tu descripción",
|
| 110 |
+
"quality": "⚡ Más pasos = mejor calidad, pero más tiempo de espera",
|
| 111 |
+
"resolution": "📐 Resoluciones más altas requieren más tiempo de procesamiento"
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
# Configuración de validación
|
| 116 |
+
VALIDATION_CONFIG = {
|
| 117 |
+
"max_prompt_length": 1000,
|
| 118 |
+
"min_prompt_length": 10,
|
| 119 |
+
"allowed_file_types": [".mp4", ".avi", ".mov"],
|
| 120 |
+
"max_file_size_mb": 100
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
# Configuración de logging
|
| 124 |
+
LOGGING_CONFIG = {
|
| 125 |
+
"level": "INFO",
|
| 126 |
+
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
| 127 |
+
"file": "veo3_free.log"
|
| 128 |
+
}
|
mmaudio/__init__.py
ADDED
|
File without changes
|
mmaudio/data/__init__.py
ADDED
|
File without changes
|
mmaudio/data/av_utils.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from fractions import Fraction
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import av
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from av import AudioFrame
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass
|
| 13 |
+
class VideoInfo:
|
| 14 |
+
duration_sec: float
|
| 15 |
+
fps: Fraction
|
| 16 |
+
clip_frames: torch.Tensor
|
| 17 |
+
sync_frames: torch.Tensor
|
| 18 |
+
all_frames: Optional[list[np.ndarray]]
|
| 19 |
+
|
| 20 |
+
@property
|
| 21 |
+
def height(self):
|
| 22 |
+
return self.all_frames[0].shape[0]
|
| 23 |
+
|
| 24 |
+
@property
|
| 25 |
+
def width(self):
|
| 26 |
+
return self.all_frames[0].shape[1]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def read_frames(video_path: Path, list_of_fps: list[float], start_sec: float, end_sec: float,
|
| 30 |
+
need_all_frames: bool) -> tuple[list[np.ndarray], list[np.ndarray], Fraction]:
|
| 31 |
+
output_frames = [[] for _ in list_of_fps]
|
| 32 |
+
next_frame_time_for_each_fps = [0.0 for _ in list_of_fps]
|
| 33 |
+
time_delta_for_each_fps = [1 / fps for fps in list_of_fps]
|
| 34 |
+
all_frames = []
|
| 35 |
+
|
| 36 |
+
# container = av.open(video_path)
|
| 37 |
+
with av.open(video_path) as container:
|
| 38 |
+
stream = container.streams.video[0]
|
| 39 |
+
fps = stream.guessed_rate
|
| 40 |
+
stream.thread_type = 'AUTO'
|
| 41 |
+
for packet in container.demux(stream):
|
| 42 |
+
for frame in packet.decode():
|
| 43 |
+
frame_time = frame.time
|
| 44 |
+
if frame_time < start_sec:
|
| 45 |
+
continue
|
| 46 |
+
if frame_time > end_sec:
|
| 47 |
+
break
|
| 48 |
+
|
| 49 |
+
frame_np = None
|
| 50 |
+
if need_all_frames:
|
| 51 |
+
frame_np = frame.to_ndarray(format='rgb24')
|
| 52 |
+
all_frames.append(frame_np)
|
| 53 |
+
|
| 54 |
+
for i, _ in enumerate(list_of_fps):
|
| 55 |
+
this_time = frame_time
|
| 56 |
+
while this_time >= next_frame_time_for_each_fps[i]:
|
| 57 |
+
if frame_np is None:
|
| 58 |
+
frame_np = frame.to_ndarray(format='rgb24')
|
| 59 |
+
|
| 60 |
+
output_frames[i].append(frame_np)
|
| 61 |
+
next_frame_time_for_each_fps[i] += time_delta_for_each_fps[i]
|
| 62 |
+
|
| 63 |
+
output_frames = [np.stack(frames) for frames in output_frames]
|
| 64 |
+
return output_frames, all_frames, fps
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def reencode_with_audio(video_info: VideoInfo, output_path: Path, audio: torch.Tensor,
|
| 68 |
+
sampling_rate: int):
|
| 69 |
+
container = av.open(output_path, 'w')
|
| 70 |
+
output_video_stream = container.add_stream('h264', video_info.fps)
|
| 71 |
+
output_video_stream.codec_context.bit_rate = 10 * 1e6 # 10 Mbps
|
| 72 |
+
output_video_stream.width = video_info.width
|
| 73 |
+
output_video_stream.height = video_info.height
|
| 74 |
+
output_video_stream.pix_fmt = 'yuv420p'
|
| 75 |
+
|
| 76 |
+
output_audio_stream = container.add_stream('aac', sampling_rate)
|
| 77 |
+
|
| 78 |
+
# encode video
|
| 79 |
+
for image in video_info.all_frames:
|
| 80 |
+
image = av.VideoFrame.from_ndarray(image)
|
| 81 |
+
packet = output_video_stream.encode(image)
|
| 82 |
+
container.mux(packet)
|
| 83 |
+
|
| 84 |
+
for packet in output_video_stream.encode():
|
| 85 |
+
container.mux(packet)
|
| 86 |
+
|
| 87 |
+
# convert float tensor audio to numpy array
|
| 88 |
+
audio_np = audio.numpy().astype(np.float32)
|
| 89 |
+
audio_frame = AudioFrame.from_ndarray(audio_np, format='flt', layout='mono')
|
| 90 |
+
audio_frame.sample_rate = sampling_rate
|
| 91 |
+
|
| 92 |
+
for packet in output_audio_stream.encode(audio_frame):
|
| 93 |
+
container.mux(packet)
|
| 94 |
+
|
| 95 |
+
for packet in output_audio_stream.encode():
|
| 96 |
+
container.mux(packet)
|
| 97 |
+
|
| 98 |
+
container.close()
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def remux_with_audio(video_path: Path, audio: torch.Tensor, output_path: Path, sampling_rate: int):
|
| 102 |
+
"""
|
| 103 |
+
NOTE: I don't think we can get the exact video duration right without re-encoding
|
| 104 |
+
so we are not using this but keeping it here for reference
|
| 105 |
+
"""
|
| 106 |
+
video = av.open(video_path)
|
| 107 |
+
output = av.open(output_path, 'w')
|
| 108 |
+
input_video_stream = video.streams.video[0]
|
| 109 |
+
output_video_stream = output.add_stream(template=input_video_stream)
|
| 110 |
+
output_audio_stream = output.add_stream('aac', sampling_rate)
|
| 111 |
+
|
| 112 |
+
duration_sec = audio.shape[-1] / sampling_rate
|
| 113 |
+
|
| 114 |
+
for packet in video.demux(input_video_stream):
|
| 115 |
+
# We need to skip the "flushing" packets that `demux` generates.
|
| 116 |
+
if packet.dts is None:
|
| 117 |
+
continue
|
| 118 |
+
# We need to assign the packet to the new stream.
|
| 119 |
+
packet.stream = output_video_stream
|
| 120 |
+
output.mux(packet)
|
| 121 |
+
|
| 122 |
+
# convert float tensor audio to numpy array
|
| 123 |
+
audio_np = audio.numpy().astype(np.float32)
|
| 124 |
+
audio_frame = av.AudioFrame.from_ndarray(audio_np, format='flt', layout='mono')
|
| 125 |
+
audio_frame.sample_rate = sampling_rate
|
| 126 |
+
|
| 127 |
+
for packet in output_audio_stream.encode(audio_frame):
|
| 128 |
+
output.mux(packet)
|
| 129 |
+
|
| 130 |
+
for packet in output_audio_stream.encode():
|
| 131 |
+
output.mux(packet)
|
| 132 |
+
|
| 133 |
+
video.close()
|
| 134 |
+
output.close()
|
| 135 |
+
|
| 136 |
+
output.close()
|
mmaudio/eval_utils.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import logging
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from colorlog import ColoredFormatter
|
| 8 |
+
from torchvision.transforms import v2
|
| 9 |
+
|
| 10 |
+
from mmaudio.data.av_utils import VideoInfo, read_frames, reencode_with_audio
|
| 11 |
+
from mmaudio.model.flow_matching import FlowMatching
|
| 12 |
+
from mmaudio.model.networks import MMAudio
|
| 13 |
+
from mmaudio.model.sequence_config import (CONFIG_16K, CONFIG_44K, SequenceConfig)
|
| 14 |
+
from mmaudio.model.utils.features_utils import FeaturesUtils
|
| 15 |
+
from mmaudio.utils.download_utils import download_model_if_needed
|
| 16 |
+
|
| 17 |
+
log = logging.getLogger()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclasses.dataclass
|
| 21 |
+
class ModelConfig:
|
| 22 |
+
model_name: str
|
| 23 |
+
model_path: Path
|
| 24 |
+
vae_path: Path
|
| 25 |
+
bigvgan_16k_path: Optional[Path]
|
| 26 |
+
mode: str
|
| 27 |
+
synchformer_ckpt: Path = Path('./ext_weights/synchformer_state_dict.pth')
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def seq_cfg(self) -> SequenceConfig:
|
| 31 |
+
if self.mode == '16k':
|
| 32 |
+
return CONFIG_16K
|
| 33 |
+
elif self.mode == '44k':
|
| 34 |
+
return CONFIG_44K
|
| 35 |
+
|
| 36 |
+
def download_if_needed(self):
|
| 37 |
+
download_model_if_needed(self.model_path)
|
| 38 |
+
download_model_if_needed(self.vae_path)
|
| 39 |
+
if self.bigvgan_16k_path is not None:
|
| 40 |
+
download_model_if_needed(self.bigvgan_16k_path)
|
| 41 |
+
download_model_if_needed(self.synchformer_ckpt)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
small_16k = ModelConfig(model_name='small_16k',
|
| 45 |
+
model_path=Path('./weights/mmaudio_small_16k.pth'),
|
| 46 |
+
vae_path=Path('./ext_weights/v1-16.pth'),
|
| 47 |
+
bigvgan_16k_path=Path('./ext_weights/best_netG.pt'),
|
| 48 |
+
mode='16k')
|
| 49 |
+
small_44k = ModelConfig(model_name='small_44k',
|
| 50 |
+
model_path=Path('./weights/mmaudio_small_44k.pth'),
|
| 51 |
+
vae_path=Path('./ext_weights/v1-44.pth'),
|
| 52 |
+
bigvgan_16k_path=None,
|
| 53 |
+
mode='44k')
|
| 54 |
+
medium_44k = ModelConfig(model_name='medium_44k',
|
| 55 |
+
model_path=Path('./weights/mmaudio_medium_44k.pth'),
|
| 56 |
+
vae_path=Path('./ext_weights/v1-44.pth'),
|
| 57 |
+
bigvgan_16k_path=None,
|
| 58 |
+
mode='44k')
|
| 59 |
+
large_44k = ModelConfig(model_name='large_44k',
|
| 60 |
+
model_path=Path('./weights/mmaudio_large_44k.pth'),
|
| 61 |
+
vae_path=Path('./ext_weights/v1-44.pth'),
|
| 62 |
+
bigvgan_16k_path=None,
|
| 63 |
+
mode='44k')
|
| 64 |
+
large_44k_v2 = ModelConfig(model_name='large_44k_v2',
|
| 65 |
+
model_path=Path('./weights/mmaudio_large_44k_v2.pth'),
|
| 66 |
+
vae_path=Path('./ext_weights/v1-44.pth'),
|
| 67 |
+
bigvgan_16k_path=None,
|
| 68 |
+
mode='44k')
|
| 69 |
+
all_model_cfg: dict[str, ModelConfig] = {
|
| 70 |
+
'small_16k': small_16k,
|
| 71 |
+
'small_44k': small_44k,
|
| 72 |
+
'medium_44k': medium_44k,
|
| 73 |
+
'large_44k': large_44k,
|
| 74 |
+
'large_44k_v2': large_44k_v2,
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def generate(
|
| 79 |
+
clip_video: Optional[torch.Tensor],
|
| 80 |
+
sync_video: Optional[torch.Tensor],
|
| 81 |
+
text: Optional[list[str]],
|
| 82 |
+
*,
|
| 83 |
+
negative_text: Optional[list[str]] = None,
|
| 84 |
+
feature_utils: FeaturesUtils,
|
| 85 |
+
net: MMAudio,
|
| 86 |
+
fm: FlowMatching,
|
| 87 |
+
rng: torch.Generator,
|
| 88 |
+
cfg_strength: float,
|
| 89 |
+
clip_batch_size_multiplier: int = 40,
|
| 90 |
+
sync_batch_size_multiplier: int = 40,
|
| 91 |
+
) -> torch.Tensor:
|
| 92 |
+
device = feature_utils.device
|
| 93 |
+
dtype = feature_utils.dtype
|
| 94 |
+
|
| 95 |
+
bs = len(text)
|
| 96 |
+
if clip_video is not None:
|
| 97 |
+
clip_video = clip_video.to(device, dtype, non_blocking=True)
|
| 98 |
+
clip_features = feature_utils.encode_video_with_clip(clip_video,
|
| 99 |
+
batch_size=bs *
|
| 100 |
+
clip_batch_size_multiplier)
|
| 101 |
+
else:
|
| 102 |
+
clip_features = net.get_empty_clip_sequence(bs)
|
| 103 |
+
|
| 104 |
+
if sync_video is not None:
|
| 105 |
+
sync_video = sync_video.to(device, dtype, non_blocking=True)
|
| 106 |
+
sync_features = feature_utils.encode_video_with_sync(sync_video,
|
| 107 |
+
batch_size=bs *
|
| 108 |
+
sync_batch_size_multiplier)
|
| 109 |
+
else:
|
| 110 |
+
sync_features = net.get_empty_sync_sequence(bs)
|
| 111 |
+
|
| 112 |
+
if text is not None:
|
| 113 |
+
text_features = feature_utils.encode_text(text)
|
| 114 |
+
else:
|
| 115 |
+
text_features = net.get_empty_string_sequence(bs)
|
| 116 |
+
|
| 117 |
+
if negative_text is not None:
|
| 118 |
+
assert len(negative_text) == bs
|
| 119 |
+
negative_text_features = feature_utils.encode_text(negative_text)
|
| 120 |
+
else:
|
| 121 |
+
negative_text_features = net.get_empty_string_sequence(bs)
|
| 122 |
+
|
| 123 |
+
x0 = torch.randn(bs,
|
| 124 |
+
net.latent_seq_len,
|
| 125 |
+
net.latent_dim,
|
| 126 |
+
device=device,
|
| 127 |
+
dtype=dtype,
|
| 128 |
+
generator=rng)
|
| 129 |
+
preprocessed_conditions = net.preprocess_conditions(clip_features, sync_features, text_features)
|
| 130 |
+
empty_conditions = net.get_empty_conditions(
|
| 131 |
+
bs, negative_text_features=negative_text_features if negative_text is not None else None)
|
| 132 |
+
|
| 133 |
+
cfg_ode_wrapper = lambda t, x: net.ode_wrapper(t, x, preprocessed_conditions, empty_conditions,
|
| 134 |
+
cfg_strength)
|
| 135 |
+
x1 = fm.to_data(cfg_ode_wrapper, x0)
|
| 136 |
+
x1 = net.unnormalize(x1)
|
| 137 |
+
spec = feature_utils.decode(x1)
|
| 138 |
+
audio = feature_utils.vocode(spec)
|
| 139 |
+
return audio
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
LOGFORMAT = " %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s"
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def setup_eval_logging(log_level: int = logging.INFO):
|
| 146 |
+
logging.root.setLevel(log_level)
|
| 147 |
+
formatter = ColoredFormatter(LOGFORMAT)
|
| 148 |
+
stream = logging.StreamHandler()
|
| 149 |
+
stream.setLevel(log_level)
|
| 150 |
+
stream.setFormatter(formatter)
|
| 151 |
+
log = logging.getLogger()
|
| 152 |
+
log.setLevel(log_level)
|
| 153 |
+
log.addHandler(stream)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def load_video(video_path: Path, duration_sec: float, load_all_frames: bool = True) -> VideoInfo:
|
| 157 |
+
_CLIP_SIZE = 384
|
| 158 |
+
_CLIP_FPS = 8.0
|
| 159 |
+
|
| 160 |
+
_SYNC_SIZE = 224
|
| 161 |
+
_SYNC_FPS = 25.0
|
| 162 |
+
|
| 163 |
+
clip_transform = v2.Compose([
|
| 164 |
+
v2.Resize((_CLIP_SIZE, _CLIP_SIZE), interpolation=v2.InterpolationMode.BICUBIC),
|
| 165 |
+
v2.ToImage(),
|
| 166 |
+
v2.ToDtype(torch.float32, scale=True),
|
| 167 |
+
])
|
| 168 |
+
|
| 169 |
+
sync_transform = v2.Compose([
|
| 170 |
+
v2.Resize(_SYNC_SIZE, interpolation=v2.InterpolationMode.BICUBIC),
|
| 171 |
+
v2.CenterCrop(_SYNC_SIZE),
|
| 172 |
+
v2.ToImage(),
|
| 173 |
+
v2.ToDtype(torch.float32, scale=True),
|
| 174 |
+
v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 175 |
+
])
|
| 176 |
+
|
| 177 |
+
output_frames, all_frames, orig_fps = read_frames(video_path,
|
| 178 |
+
list_of_fps=[_CLIP_FPS, _SYNC_FPS],
|
| 179 |
+
start_sec=0,
|
| 180 |
+
end_sec=duration_sec,
|
| 181 |
+
need_all_frames=load_all_frames)
|
| 182 |
+
|
| 183 |
+
clip_chunk, sync_chunk = output_frames
|
| 184 |
+
clip_chunk = torch.from_numpy(clip_chunk).permute(0, 3, 1, 2)
|
| 185 |
+
sync_chunk = torch.from_numpy(sync_chunk).permute(0, 3, 1, 2)
|
| 186 |
+
|
| 187 |
+
clip_frames = clip_transform(clip_chunk)
|
| 188 |
+
sync_frames = sync_transform(sync_chunk)
|
| 189 |
+
|
| 190 |
+
clip_length_sec = clip_frames.shape[0] / _CLIP_FPS
|
| 191 |
+
sync_length_sec = sync_frames.shape[0] / _SYNC_FPS
|
| 192 |
+
|
| 193 |
+
if clip_length_sec < duration_sec:
|
| 194 |
+
log.warning(f'Clip video is too short: {clip_length_sec:.2f} < {duration_sec:.2f}')
|
| 195 |
+
log.warning(f'Truncating to {clip_length_sec:.2f} sec')
|
| 196 |
+
duration_sec = clip_length_sec
|
| 197 |
+
|
| 198 |
+
if sync_length_sec < duration_sec:
|
| 199 |
+
log.warning(f'Sync video is too short: {sync_length_sec:.2f} < {duration_sec:.2f}')
|
| 200 |
+
log.warning(f'Truncating to {sync_length_sec:.2f} sec')
|
| 201 |
+
duration_sec = sync_length_sec
|
| 202 |
+
|
| 203 |
+
clip_frames = clip_frames[:int(_CLIP_FPS * duration_sec)]
|
| 204 |
+
sync_frames = sync_frames[:int(_SYNC_FPS * duration_sec)]
|
| 205 |
+
|
| 206 |
+
video_info = VideoInfo(
|
| 207 |
+
duration_sec=duration_sec,
|
| 208 |
+
fps=orig_fps,
|
| 209 |
+
clip_frames=clip_frames,
|
| 210 |
+
sync_frames=sync_frames,
|
| 211 |
+
all_frames=all_frames if load_all_frames else None,
|
| 212 |
+
)
|
| 213 |
+
return video_info
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def make_video(video_info: VideoInfo, output_path: Path, audio: torch.Tensor, sampling_rate: int):
|
| 217 |
+
reencode_with_audio(video_info, output_path, audio, sampling_rate)
|
mmaudio/ext/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
mmaudio/ext/autoencoder/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .autoencoder import AutoEncoderModule
|
mmaudio/ext/autoencoder/autoencoder.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Literal, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from mmaudio.ext.autoencoder.vae import VAE, get_my_vae
|
| 7 |
+
from mmaudio.ext.bigvgan import BigVGAN
|
| 8 |
+
from mmaudio.ext.bigvgan_v2.bigvgan import BigVGAN as BigVGANv2
|
| 9 |
+
from mmaudio.model.utils.distributions import DiagonalGaussianDistribution
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class AutoEncoderModule(nn.Module):
|
| 13 |
+
|
| 14 |
+
def __init__(self,
|
| 15 |
+
*,
|
| 16 |
+
vae_ckpt_path,
|
| 17 |
+
vocoder_ckpt_path: Optional[str] = None,
|
| 18 |
+
mode: Literal['16k', '44k'],
|
| 19 |
+
need_vae_encoder: bool = True):
|
| 20 |
+
super().__init__()
|
| 21 |
+
self.vae: VAE = get_my_vae(mode).eval()
|
| 22 |
+
vae_state_dict = torch.load(vae_ckpt_path, weights_only=True, map_location='cpu')
|
| 23 |
+
self.vae.load_state_dict(vae_state_dict, strict=False)
|
| 24 |
+
self.vae.remove_weight_norm()
|
| 25 |
+
|
| 26 |
+
if mode == '16k':
|
| 27 |
+
assert vocoder_ckpt_path is not None
|
| 28 |
+
self.vocoder = BigVGAN(vocoder_ckpt_path).eval()
|
| 29 |
+
elif mode == '44k':
|
| 30 |
+
self.vocoder = BigVGANv2.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x',
|
| 31 |
+
use_cuda_kernel=False)
|
| 32 |
+
self.vocoder.remove_weight_norm()
|
| 33 |
+
else:
|
| 34 |
+
raise ValueError(f'Unknown mode: {mode}')
|
| 35 |
+
|
| 36 |
+
for param in self.parameters():
|
| 37 |
+
param.requires_grad = False
|
| 38 |
+
|
| 39 |
+
if not need_vae_encoder:
|
| 40 |
+
del self.vae.encoder
|
| 41 |
+
|
| 42 |
+
@torch.inference_mode()
|
| 43 |
+
def encode(self, x: torch.Tensor) -> DiagonalGaussianDistribution:
|
| 44 |
+
return self.vae.encode(x)
|
| 45 |
+
|
| 46 |
+
@torch.inference_mode()
|
| 47 |
+
def decode(self, z: torch.Tensor) -> torch.Tensor:
|
| 48 |
+
return self.vae.decode(z)
|
| 49 |
+
|
| 50 |
+
@torch.inference_mode()
|
| 51 |
+
def vocode(self, spec: torch.Tensor) -> torch.Tensor:
|
| 52 |
+
return self.vocoder(spec)
|
mmaudio/ext/autoencoder/edm2_utils.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# This work is licensed under a Creative Commons
|
| 4 |
+
# Attribution-NonCommercial-ShareAlike 4.0 International License.
|
| 5 |
+
# You should have received a copy of the license along with this
|
| 6 |
+
# work. If not, see http://creativecommons.org/licenses/by-nc-sa/4.0/
|
| 7 |
+
"""Improved diffusion model architecture proposed in the paper
|
| 8 |
+
"Analyzing and Improving the Training Dynamics of Diffusion Models"."""
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
#----------------------------------------------------------------------------
|
| 14 |
+
# Variant of constant() that inherits dtype and device from the given
|
| 15 |
+
# reference tensor by default.
|
| 16 |
+
|
| 17 |
+
_constant_cache = dict()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
|
| 21 |
+
value = np.asarray(value)
|
| 22 |
+
if shape is not None:
|
| 23 |
+
shape = tuple(shape)
|
| 24 |
+
if dtype is None:
|
| 25 |
+
dtype = torch.get_default_dtype()
|
| 26 |
+
if device is None:
|
| 27 |
+
device = torch.device('cpu')
|
| 28 |
+
if memory_format is None:
|
| 29 |
+
memory_format = torch.contiguous_format
|
| 30 |
+
|
| 31 |
+
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
|
| 32 |
+
tensor = _constant_cache.get(key, None)
|
| 33 |
+
if tensor is None:
|
| 34 |
+
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
|
| 35 |
+
if shape is not None:
|
| 36 |
+
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
|
| 37 |
+
tensor = tensor.contiguous(memory_format=memory_format)
|
| 38 |
+
_constant_cache[key] = tensor
|
| 39 |
+
return tensor
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def const_like(ref, value, shape=None, dtype=None, device=None, memory_format=None):
|
| 43 |
+
if dtype is None:
|
| 44 |
+
dtype = ref.dtype
|
| 45 |
+
if device is None:
|
| 46 |
+
device = ref.device
|
| 47 |
+
return constant(value, shape=shape, dtype=dtype, device=device, memory_format=memory_format)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
#----------------------------------------------------------------------------
|
| 51 |
+
# Normalize given tensor to unit magnitude with respect to the given
|
| 52 |
+
# dimensions. Default = all dimensions except the first.
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def normalize(x, dim=None, eps=1e-4):
|
| 56 |
+
if dim is None:
|
| 57 |
+
dim = list(range(1, x.ndim))
|
| 58 |
+
norm = torch.linalg.vector_norm(x, dim=dim, keepdim=True, dtype=torch.float32)
|
| 59 |
+
norm = torch.add(eps, norm, alpha=np.sqrt(norm.numel() / x.numel()))
|
| 60 |
+
return x / norm.to(x.dtype)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class Normalize(torch.nn.Module):
|
| 64 |
+
|
| 65 |
+
def __init__(self, dim=None, eps=1e-4):
|
| 66 |
+
super().__init__()
|
| 67 |
+
self.dim = dim
|
| 68 |
+
self.eps = eps
|
| 69 |
+
|
| 70 |
+
def forward(self, x):
|
| 71 |
+
return normalize(x, dim=self.dim, eps=self.eps)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
#----------------------------------------------------------------------------
|
| 75 |
+
# Upsample or downsample the given tensor with the given filter,
|
| 76 |
+
# or keep it as is.
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def resample(x, f=[1, 1], mode='keep'):
|
| 80 |
+
if mode == 'keep':
|
| 81 |
+
return x
|
| 82 |
+
f = np.float32(f)
|
| 83 |
+
assert f.ndim == 1 and len(f) % 2 == 0
|
| 84 |
+
pad = (len(f) - 1) // 2
|
| 85 |
+
f = f / f.sum()
|
| 86 |
+
f = np.outer(f, f)[np.newaxis, np.newaxis, :, :]
|
| 87 |
+
f = const_like(x, f)
|
| 88 |
+
c = x.shape[1]
|
| 89 |
+
if mode == 'down':
|
| 90 |
+
return torch.nn.functional.conv2d(x,
|
| 91 |
+
f.tile([c, 1, 1, 1]),
|
| 92 |
+
groups=c,
|
| 93 |
+
stride=2,
|
| 94 |
+
padding=(pad, ))
|
| 95 |
+
assert mode == 'up'
|
| 96 |
+
return torch.nn.functional.conv_transpose2d(x, (f * 4).tile([c, 1, 1, 1]),
|
| 97 |
+
groups=c,
|
| 98 |
+
stride=2,
|
| 99 |
+
padding=(pad, ))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
#----------------------------------------------------------------------------
|
| 103 |
+
# Magnitude-preserving SiLU (Equation 81).
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def mp_silu(x):
|
| 107 |
+
return torch.nn.functional.silu(x) / 0.596
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class MPSiLU(torch.nn.Module):
|
| 111 |
+
|
| 112 |
+
def forward(self, x):
|
| 113 |
+
return mp_silu(x)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
#----------------------------------------------------------------------------
|
| 117 |
+
# Magnitude-preserving sum (Equation 88).
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def mp_sum(a, b, t=0.5):
|
| 121 |
+
return a.lerp(b, t) / np.sqrt((1 - t)**2 + t**2)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
#----------------------------------------------------------------------------
|
| 125 |
+
# Magnitude-preserving concatenation (Equation 103).
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def mp_cat(a, b, dim=1, t=0.5):
|
| 129 |
+
Na = a.shape[dim]
|
| 130 |
+
Nb = b.shape[dim]
|
| 131 |
+
C = np.sqrt((Na + Nb) / ((1 - t)**2 + t**2))
|
| 132 |
+
wa = C / np.sqrt(Na) * (1 - t)
|
| 133 |
+
wb = C / np.sqrt(Nb) * t
|
| 134 |
+
return torch.cat([wa * a, wb * b], dim=dim)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
#----------------------------------------------------------------------------
|
| 138 |
+
# Magnitude-preserving convolution or fully-connected layer (Equation 47)
|
| 139 |
+
# with force weight normalization (Equation 66).
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class MPConv1D(torch.nn.Module):
|
| 143 |
+
|
| 144 |
+
def __init__(self, in_channels, out_channels, kernel_size):
|
| 145 |
+
super().__init__()
|
| 146 |
+
self.out_channels = out_channels
|
| 147 |
+
self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels, kernel_size))
|
| 148 |
+
|
| 149 |
+
self.weight_norm_removed = False
|
| 150 |
+
|
| 151 |
+
def forward(self, x, gain=1):
|
| 152 |
+
assert self.weight_norm_removed, 'call remove_weight_norm() before inference'
|
| 153 |
+
|
| 154 |
+
w = self.weight * gain
|
| 155 |
+
if w.ndim == 2:
|
| 156 |
+
return x @ w.t()
|
| 157 |
+
assert w.ndim == 3
|
| 158 |
+
return torch.nn.functional.conv1d(x, w, padding=(w.shape[-1] // 2, ))
|
| 159 |
+
|
| 160 |
+
def remove_weight_norm(self):
|
| 161 |
+
w = self.weight.to(torch.float32)
|
| 162 |
+
w = normalize(w) # traditional weight normalization
|
| 163 |
+
w = w / np.sqrt(w[0].numel())
|
| 164 |
+
w = w.to(self.weight.dtype)
|
| 165 |
+
self.weight.data.copy_(w)
|
| 166 |
+
|
| 167 |
+
self.weight_norm_removed = True
|
| 168 |
+
return self
|
mmaudio/ext/autoencoder/vae.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from mmaudio.ext.autoencoder.edm2_utils import MPConv1D
|
| 8 |
+
from mmaudio.ext.autoencoder.vae_modules import (AttnBlock1D, Downsample1D, ResnetBlock1D,
|
| 9 |
+
Upsample1D, nonlinearity)
|
| 10 |
+
from mmaudio.model.utils.distributions import DiagonalGaussianDistribution
|
| 11 |
+
|
| 12 |
+
log = logging.getLogger()
|
| 13 |
+
|
| 14 |
+
DATA_MEAN_80D = [
|
| 15 |
+
-1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927,
|
| 16 |
+
-1.3170, -1.3543, -1.3401, -1.3836, -1.3907, -1.3912, -1.4313, -1.4152, -1.4527, -1.4728,
|
| 17 |
+
-1.4568, -1.5101, -1.5051, -1.5172, -1.5623, -1.5373, -1.5746, -1.5687, -1.6032, -1.6131,
|
| 18 |
+
-1.6081, -1.6331, -1.6489, -1.6489, -1.6700, -1.6738, -1.6953, -1.6969, -1.7048, -1.7280,
|
| 19 |
+
-1.7361, -1.7495, -1.7658, -1.7814, -1.7889, -1.8064, -1.8221, -1.8377, -1.8417, -1.8643,
|
| 20 |
+
-1.8857, -1.8929, -1.9173, -1.9379, -1.9531, -1.9673, -1.9824, -2.0042, -2.0215, -2.0436,
|
| 21 |
+
-2.0766, -2.1064, -2.1418, -2.1855, -2.2319, -2.2767, -2.3161, -2.3572, -2.3954, -2.4282,
|
| 22 |
+
-2.4659, -2.5072, -2.5552, -2.6074, -2.6584, -2.7107, -2.7634, -2.8266, -2.8981, -2.9673
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
DATA_STD_80D = [
|
| 26 |
+
1.0291, 1.0411, 1.0043, 0.9820, 0.9677, 0.9543, 0.9450, 0.9392, 0.9343, 0.9297, 0.9276, 0.9263,
|
| 27 |
+
0.9242, 0.9254, 0.9232, 0.9281, 0.9263, 0.9315, 0.9274, 0.9247, 0.9277, 0.9199, 0.9188, 0.9194,
|
| 28 |
+
0.9160, 0.9161, 0.9146, 0.9161, 0.9100, 0.9095, 0.9145, 0.9076, 0.9066, 0.9095, 0.9032, 0.9043,
|
| 29 |
+
0.9038, 0.9011, 0.9019, 0.9010, 0.8984, 0.8983, 0.8986, 0.8961, 0.8962, 0.8978, 0.8962, 0.8973,
|
| 30 |
+
0.8993, 0.8976, 0.8995, 0.9016, 0.8982, 0.8972, 0.8974, 0.8949, 0.8940, 0.8947, 0.8936, 0.8939,
|
| 31 |
+
0.8951, 0.8956, 0.9017, 0.9167, 0.9436, 0.9690, 1.0003, 1.0225, 1.0381, 1.0491, 1.0545, 1.0604,
|
| 32 |
+
1.0761, 1.0929, 1.1089, 1.1196, 1.1176, 1.1156, 1.1117, 1.1070
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
DATA_MEAN_128D = [
|
| 36 |
+
-3.3462, -2.6723, -2.4893, -2.3143, -2.2664, -2.3317, -2.1802, -2.4006, -2.2357, -2.4597,
|
| 37 |
+
-2.3717, -2.4690, -2.5142, -2.4919, -2.6610, -2.5047, -2.7483, -2.5926, -2.7462, -2.7033,
|
| 38 |
+
-2.7386, -2.8112, -2.7502, -2.9594, -2.7473, -3.0035, -2.8891, -2.9922, -2.9856, -3.0157,
|
| 39 |
+
-3.1191, -2.9893, -3.1718, -3.0745, -3.1879, -3.2310, -3.1424, -3.2296, -3.2791, -3.2782,
|
| 40 |
+
-3.2756, -3.3134, -3.3509, -3.3750, -3.3951, -3.3698, -3.4505, -3.4509, -3.5089, -3.4647,
|
| 41 |
+
-3.5536, -3.5788, -3.5867, -3.6036, -3.6400, -3.6747, -3.7072, -3.7279, -3.7283, -3.7795,
|
| 42 |
+
-3.8259, -3.8447, -3.8663, -3.9182, -3.9605, -3.9861, -4.0105, -4.0373, -4.0762, -4.1121,
|
| 43 |
+
-4.1488, -4.1874, -4.2461, -4.3170, -4.3639, -4.4452, -4.5282, -4.6297, -4.7019, -4.7960,
|
| 44 |
+
-4.8700, -4.9507, -5.0303, -5.0866, -5.1634, -5.2342, -5.3242, -5.4053, -5.4927, -5.5712,
|
| 45 |
+
-5.6464, -5.7052, -5.7619, -5.8410, -5.9188, -6.0103, -6.0955, -6.1673, -6.2362, -6.3120,
|
| 46 |
+
-6.3926, -6.4797, -6.5565, -6.6511, -6.8130, -6.9961, -7.1275, -7.2457, -7.3576, -7.4663,
|
| 47 |
+
-7.6136, -7.7469, -7.8815, -8.0132, -8.1515, -8.3071, -8.4722, -8.7418, -9.3975, -9.6628,
|
| 48 |
+
-9.7671, -9.8863, -9.9992, -10.0860, -10.1709, -10.5418, -11.2795, -11.3861
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
DATA_STD_128D = [
|
| 52 |
+
2.3804, 2.4368, 2.3772, 2.3145, 2.2803, 2.2510, 2.2316, 2.2083, 2.1996, 2.1835, 2.1769, 2.1659,
|
| 53 |
+
2.1631, 2.1618, 2.1540, 2.1606, 2.1571, 2.1567, 2.1612, 2.1579, 2.1679, 2.1683, 2.1634, 2.1557,
|
| 54 |
+
2.1668, 2.1518, 2.1415, 2.1449, 2.1406, 2.1350, 2.1313, 2.1415, 2.1281, 2.1352, 2.1219, 2.1182,
|
| 55 |
+
2.1327, 2.1195, 2.1137, 2.1080, 2.1179, 2.1036, 2.1087, 2.1036, 2.1015, 2.1068, 2.0975, 2.0991,
|
| 56 |
+
2.0902, 2.1015, 2.0857, 2.0920, 2.0893, 2.0897, 2.0910, 2.0881, 2.0925, 2.0873, 2.0960, 2.0900,
|
| 57 |
+
2.0957, 2.0958, 2.0978, 2.0936, 2.0886, 2.0905, 2.0845, 2.0855, 2.0796, 2.0840, 2.0813, 2.0817,
|
| 58 |
+
2.0838, 2.0840, 2.0917, 2.1061, 2.1431, 2.1976, 2.2482, 2.3055, 2.3700, 2.4088, 2.4372, 2.4609,
|
| 59 |
+
2.4731, 2.4847, 2.5072, 2.5451, 2.5772, 2.6147, 2.6529, 2.6596, 2.6645, 2.6726, 2.6803, 2.6812,
|
| 60 |
+
2.6899, 2.6916, 2.6931, 2.6998, 2.7062, 2.7262, 2.7222, 2.7158, 2.7041, 2.7485, 2.7491, 2.7451,
|
| 61 |
+
2.7485, 2.7233, 2.7297, 2.7233, 2.7145, 2.6958, 2.6788, 2.6439, 2.6007, 2.4786, 2.2469, 2.1877,
|
| 62 |
+
2.1392, 2.0717, 2.0107, 1.9676, 1.9140, 1.7102, 0.9101, 0.7164
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class VAE(nn.Module):
|
| 67 |
+
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
*,
|
| 71 |
+
data_dim: int,
|
| 72 |
+
embed_dim: int,
|
| 73 |
+
hidden_dim: int,
|
| 74 |
+
):
|
| 75 |
+
super().__init__()
|
| 76 |
+
|
| 77 |
+
if data_dim == 80:
|
| 78 |
+
# self.data_mean = torch.tensor(DATA_MEAN_80D, dtype=torch.float32).cuda()
|
| 79 |
+
# self.data_std = torch.tensor(DATA_STD_80D, dtype=torch.float32).cuda()
|
| 80 |
+
self.register_buffer('data_mean', torch.tensor(DATA_MEAN_80D, dtype=torch.float32))
|
| 81 |
+
self.register_buffer('data_std', torch.tensor(DATA_STD_80D, dtype=torch.float32))
|
| 82 |
+
elif data_dim == 128:
|
| 83 |
+
# torch.tensor(DATA_MEAN_128D, dtype=torch.float32).cuda()
|
| 84 |
+
# self.data_std = torch.tensor(DATA_STD_128D, dtype=torch.float32).cuda()
|
| 85 |
+
self.register_buffer('data_mean', torch.tensor(DATA_MEAN_128D, dtype=torch.float32))
|
| 86 |
+
self.register_buffer('data_std', torch.tensor(DATA_STD_128D, dtype=torch.float32))
|
| 87 |
+
|
| 88 |
+
self.data_mean = self.data_mean.view(1, -1, 1)
|
| 89 |
+
self.data_std = self.data_std.view(1, -1, 1)
|
| 90 |
+
|
| 91 |
+
self.encoder = Encoder1D(
|
| 92 |
+
dim=hidden_dim,
|
| 93 |
+
ch_mult=(1, 2, 4),
|
| 94 |
+
num_res_blocks=2,
|
| 95 |
+
attn_layers=[3],
|
| 96 |
+
down_layers=[0],
|
| 97 |
+
in_dim=data_dim,
|
| 98 |
+
embed_dim=embed_dim,
|
| 99 |
+
)
|
| 100 |
+
self.decoder = Decoder1D(
|
| 101 |
+
dim=hidden_dim,
|
| 102 |
+
ch_mult=(1, 2, 4),
|
| 103 |
+
num_res_blocks=2,
|
| 104 |
+
attn_layers=[3],
|
| 105 |
+
down_layers=[0],
|
| 106 |
+
in_dim=data_dim,
|
| 107 |
+
out_dim=data_dim,
|
| 108 |
+
embed_dim=embed_dim,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
self.embed_dim = embed_dim
|
| 112 |
+
# self.quant_conv = nn.Conv1d(2 * embed_dim, 2 * embed_dim, 1)
|
| 113 |
+
# self.post_quant_conv = nn.Conv1d(embed_dim, embed_dim, 1)
|
| 114 |
+
|
| 115 |
+
self.initialize_weights()
|
| 116 |
+
|
| 117 |
+
def initialize_weights(self):
|
| 118 |
+
pass
|
| 119 |
+
|
| 120 |
+
def encode(self, x: torch.Tensor, normalize: bool = True) -> DiagonalGaussianDistribution:
|
| 121 |
+
if normalize:
|
| 122 |
+
x = self.normalize(x)
|
| 123 |
+
moments = self.encoder(x)
|
| 124 |
+
posterior = DiagonalGaussianDistribution(moments)
|
| 125 |
+
return posterior
|
| 126 |
+
|
| 127 |
+
def decode(self, z: torch.Tensor, unnormalize: bool = True) -> torch.Tensor:
|
| 128 |
+
dec = self.decoder(z)
|
| 129 |
+
if unnormalize:
|
| 130 |
+
dec = self.unnormalize(dec)
|
| 131 |
+
return dec
|
| 132 |
+
|
| 133 |
+
def normalize(self, x: torch.Tensor) -> torch.Tensor:
|
| 134 |
+
return (x - self.data_mean) / self.data_std
|
| 135 |
+
|
| 136 |
+
def unnormalize(self, x: torch.Tensor) -> torch.Tensor:
|
| 137 |
+
return x * self.data_std + self.data_mean
|
| 138 |
+
|
| 139 |
+
def forward(
|
| 140 |
+
self,
|
| 141 |
+
x: torch.Tensor,
|
| 142 |
+
sample_posterior: bool = True,
|
| 143 |
+
rng: Optional[torch.Generator] = None,
|
| 144 |
+
normalize: bool = True,
|
| 145 |
+
unnormalize: bool = True,
|
| 146 |
+
) -> tuple[torch.Tensor, DiagonalGaussianDistribution]:
|
| 147 |
+
|
| 148 |
+
posterior = self.encode(x, normalize=normalize)
|
| 149 |
+
if sample_posterior:
|
| 150 |
+
z = posterior.sample(rng)
|
| 151 |
+
else:
|
| 152 |
+
z = posterior.mode()
|
| 153 |
+
dec = self.decode(z, unnormalize=unnormalize)
|
| 154 |
+
return dec, posterior
|
| 155 |
+
|
| 156 |
+
def load_weights(self, src_dict) -> None:
|
| 157 |
+
self.load_state_dict(src_dict, strict=True)
|
| 158 |
+
|
| 159 |
+
@property
|
| 160 |
+
def device(self) -> torch.device:
|
| 161 |
+
return next(self.parameters()).device
|
| 162 |
+
|
| 163 |
+
def get_last_layer(self):
|
| 164 |
+
return self.decoder.conv_out.weight
|
| 165 |
+
|
| 166 |
+
def remove_weight_norm(self):
|
| 167 |
+
for name, m in self.named_modules():
|
| 168 |
+
if isinstance(m, MPConv1D):
|
| 169 |
+
m.remove_weight_norm()
|
| 170 |
+
log.debug(f"Removed weight norm from {name}")
|
| 171 |
+
return self
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class Encoder1D(nn.Module):
|
| 175 |
+
|
| 176 |
+
def __init__(self,
|
| 177 |
+
*,
|
| 178 |
+
dim: int,
|
| 179 |
+
ch_mult: tuple[int] = (1, 2, 4, 8),
|
| 180 |
+
num_res_blocks: int,
|
| 181 |
+
attn_layers: list[int] = [],
|
| 182 |
+
down_layers: list[int] = [],
|
| 183 |
+
resamp_with_conv: bool = True,
|
| 184 |
+
in_dim: int,
|
| 185 |
+
embed_dim: int,
|
| 186 |
+
double_z: bool = True,
|
| 187 |
+
kernel_size: int = 3,
|
| 188 |
+
clip_act: float = 256.0):
|
| 189 |
+
super().__init__()
|
| 190 |
+
self.dim = dim
|
| 191 |
+
self.num_layers = len(ch_mult)
|
| 192 |
+
self.num_res_blocks = num_res_blocks
|
| 193 |
+
self.in_channels = in_dim
|
| 194 |
+
self.clip_act = clip_act
|
| 195 |
+
self.down_layers = down_layers
|
| 196 |
+
self.attn_layers = attn_layers
|
| 197 |
+
self.conv_in = MPConv1D(in_dim, self.dim, kernel_size=kernel_size)
|
| 198 |
+
|
| 199 |
+
in_ch_mult = (1, ) + tuple(ch_mult)
|
| 200 |
+
self.in_ch_mult = in_ch_mult
|
| 201 |
+
# downsampling
|
| 202 |
+
self.down = nn.ModuleList()
|
| 203 |
+
for i_level in range(self.num_layers):
|
| 204 |
+
block = nn.ModuleList()
|
| 205 |
+
attn = nn.ModuleList()
|
| 206 |
+
block_in = dim * in_ch_mult[i_level]
|
| 207 |
+
block_out = dim * ch_mult[i_level]
|
| 208 |
+
for i_block in range(self.num_res_blocks):
|
| 209 |
+
block.append(
|
| 210 |
+
ResnetBlock1D(in_dim=block_in,
|
| 211 |
+
out_dim=block_out,
|
| 212 |
+
kernel_size=kernel_size,
|
| 213 |
+
use_norm=True))
|
| 214 |
+
block_in = block_out
|
| 215 |
+
if i_level in attn_layers:
|
| 216 |
+
attn.append(AttnBlock1D(block_in))
|
| 217 |
+
down = nn.Module()
|
| 218 |
+
down.block = block
|
| 219 |
+
down.attn = attn
|
| 220 |
+
if i_level in down_layers:
|
| 221 |
+
down.downsample = Downsample1D(block_in, resamp_with_conv)
|
| 222 |
+
self.down.append(down)
|
| 223 |
+
|
| 224 |
+
# middle
|
| 225 |
+
self.mid = nn.Module()
|
| 226 |
+
self.mid.block_1 = ResnetBlock1D(in_dim=block_in,
|
| 227 |
+
out_dim=block_in,
|
| 228 |
+
kernel_size=kernel_size,
|
| 229 |
+
use_norm=True)
|
| 230 |
+
self.mid.attn_1 = AttnBlock1D(block_in)
|
| 231 |
+
self.mid.block_2 = ResnetBlock1D(in_dim=block_in,
|
| 232 |
+
out_dim=block_in,
|
| 233 |
+
kernel_size=kernel_size,
|
| 234 |
+
use_norm=True)
|
| 235 |
+
|
| 236 |
+
# end
|
| 237 |
+
self.conv_out = MPConv1D(block_in,
|
| 238 |
+
2 * embed_dim if double_z else embed_dim,
|
| 239 |
+
kernel_size=kernel_size)
|
| 240 |
+
|
| 241 |
+
self.learnable_gain = nn.Parameter(torch.zeros([]))
|
| 242 |
+
|
| 243 |
+
def forward(self, x):
|
| 244 |
+
|
| 245 |
+
# downsampling
|
| 246 |
+
hs = [self.conv_in(x)]
|
| 247 |
+
for i_level in range(self.num_layers):
|
| 248 |
+
for i_block in range(self.num_res_blocks):
|
| 249 |
+
h = self.down[i_level].block[i_block](hs[-1])
|
| 250 |
+
if len(self.down[i_level].attn) > 0:
|
| 251 |
+
h = self.down[i_level].attn[i_block](h)
|
| 252 |
+
h = h.clamp(-self.clip_act, self.clip_act)
|
| 253 |
+
hs.append(h)
|
| 254 |
+
if i_level in self.down_layers:
|
| 255 |
+
hs.append(self.down[i_level].downsample(hs[-1]))
|
| 256 |
+
|
| 257 |
+
# middle
|
| 258 |
+
h = hs[-1]
|
| 259 |
+
h = self.mid.block_1(h)
|
| 260 |
+
h = self.mid.attn_1(h)
|
| 261 |
+
h = self.mid.block_2(h)
|
| 262 |
+
h = h.clamp(-self.clip_act, self.clip_act)
|
| 263 |
+
|
| 264 |
+
# end
|
| 265 |
+
h = nonlinearity(h)
|
| 266 |
+
h = self.conv_out(h, gain=(self.learnable_gain + 1))
|
| 267 |
+
return h
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class Decoder1D(nn.Module):
|
| 271 |
+
|
| 272 |
+
def __init__(self,
|
| 273 |
+
*,
|
| 274 |
+
dim: int,
|
| 275 |
+
out_dim: int,
|
| 276 |
+
ch_mult: tuple[int] = (1, 2, 4, 8),
|
| 277 |
+
num_res_blocks: int,
|
| 278 |
+
attn_layers: list[int] = [],
|
| 279 |
+
down_layers: list[int] = [],
|
| 280 |
+
kernel_size: int = 3,
|
| 281 |
+
resamp_with_conv: bool = True,
|
| 282 |
+
in_dim: int,
|
| 283 |
+
embed_dim: int,
|
| 284 |
+
clip_act: float = 256.0):
|
| 285 |
+
super().__init__()
|
| 286 |
+
self.ch = dim
|
| 287 |
+
self.num_layers = len(ch_mult)
|
| 288 |
+
self.num_res_blocks = num_res_blocks
|
| 289 |
+
self.in_channels = in_dim
|
| 290 |
+
self.clip_act = clip_act
|
| 291 |
+
self.down_layers = [i + 1 for i in down_layers] # each downlayer add one
|
| 292 |
+
|
| 293 |
+
# compute in_ch_mult, block_in and curr_res at lowest res
|
| 294 |
+
block_in = dim * ch_mult[self.num_layers - 1]
|
| 295 |
+
|
| 296 |
+
# z to block_in
|
| 297 |
+
self.conv_in = MPConv1D(embed_dim, block_in, kernel_size=kernel_size)
|
| 298 |
+
|
| 299 |
+
# middle
|
| 300 |
+
self.mid = nn.Module()
|
| 301 |
+
self.mid.block_1 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True)
|
| 302 |
+
self.mid.attn_1 = AttnBlock1D(block_in)
|
| 303 |
+
self.mid.block_2 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True)
|
| 304 |
+
|
| 305 |
+
# upsampling
|
| 306 |
+
self.up = nn.ModuleList()
|
| 307 |
+
for i_level in reversed(range(self.num_layers)):
|
| 308 |
+
block = nn.ModuleList()
|
| 309 |
+
attn = nn.ModuleList()
|
| 310 |
+
block_out = dim * ch_mult[i_level]
|
| 311 |
+
for i_block in range(self.num_res_blocks + 1):
|
| 312 |
+
block.append(ResnetBlock1D(in_dim=block_in, out_dim=block_out, use_norm=True))
|
| 313 |
+
block_in = block_out
|
| 314 |
+
if i_level in attn_layers:
|
| 315 |
+
attn.append(AttnBlock1D(block_in))
|
| 316 |
+
up = nn.Module()
|
| 317 |
+
up.block = block
|
| 318 |
+
up.attn = attn
|
| 319 |
+
if i_level in self.down_layers:
|
| 320 |
+
up.upsample = Upsample1D(block_in, resamp_with_conv)
|
| 321 |
+
self.up.insert(0, up) # prepend to get consistent order
|
| 322 |
+
|
| 323 |
+
# end
|
| 324 |
+
self.conv_out = MPConv1D(block_in, out_dim, kernel_size=kernel_size)
|
| 325 |
+
self.learnable_gain = nn.Parameter(torch.zeros([]))
|
| 326 |
+
|
| 327 |
+
def forward(self, z):
|
| 328 |
+
# z to block_in
|
| 329 |
+
h = self.conv_in(z)
|
| 330 |
+
|
| 331 |
+
# middle
|
| 332 |
+
h = self.mid.block_1(h)
|
| 333 |
+
h = self.mid.attn_1(h)
|
| 334 |
+
h = self.mid.block_2(h)
|
| 335 |
+
h = h.clamp(-self.clip_act, self.clip_act)
|
| 336 |
+
|
| 337 |
+
# upsampling
|
| 338 |
+
for i_level in reversed(range(self.num_layers)):
|
| 339 |
+
for i_block in range(self.num_res_blocks + 1):
|
| 340 |
+
h = self.up[i_level].block[i_block](h)
|
| 341 |
+
if len(self.up[i_level].attn) > 0:
|
| 342 |
+
h = self.up[i_level].attn[i_block](h)
|
| 343 |
+
h = h.clamp(-self.clip_act, self.clip_act)
|
| 344 |
+
if i_level in self.down_layers:
|
| 345 |
+
h = self.up[i_level].upsample(h)
|
| 346 |
+
|
| 347 |
+
h = nonlinearity(h)
|
| 348 |
+
h = self.conv_out(h, gain=(self.learnable_gain + 1))
|
| 349 |
+
return h
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def VAE_16k(**kwargs) -> VAE:
|
| 353 |
+
return VAE(data_dim=80, embed_dim=20, hidden_dim=384, **kwargs)
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def VAE_44k(**kwargs) -> VAE:
|
| 357 |
+
return VAE(data_dim=128, embed_dim=40, hidden_dim=512, **kwargs)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def get_my_vae(name: str, **kwargs) -> VAE:
|
| 361 |
+
if name == '16k':
|
| 362 |
+
return VAE_16k(**kwargs)
|
| 363 |
+
if name == '44k':
|
| 364 |
+
return VAE_44k(**kwargs)
|
| 365 |
+
raise ValueError(f'Unknown model: {name}')
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
if __name__ == '__main__':
|
| 369 |
+
network = get_my_vae('standard')
|
| 370 |
+
|
| 371 |
+
# print the number of parameters in terms of millions
|
| 372 |
+
num_params = sum(p.numel() for p in network.parameters()) / 1e6
|
| 373 |
+
print(f'Number of parameters: {num_params:.2f}M')
|
mmaudio/ext/autoencoder/vae_modules.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from einops import rearrange
|
| 5 |
+
|
| 6 |
+
from mmaudio.ext.autoencoder.edm2_utils import (MPConv1D, mp_silu, mp_sum, normalize)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def nonlinearity(x):
|
| 10 |
+
# swish
|
| 11 |
+
return mp_silu(x)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ResnetBlock1D(nn.Module):
|
| 15 |
+
|
| 16 |
+
def __init__(self, *, in_dim, out_dim=None, conv_shortcut=False, kernel_size=3, use_norm=True):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.in_dim = in_dim
|
| 19 |
+
out_dim = in_dim if out_dim is None else out_dim
|
| 20 |
+
self.out_dim = out_dim
|
| 21 |
+
self.use_conv_shortcut = conv_shortcut
|
| 22 |
+
self.use_norm = use_norm
|
| 23 |
+
|
| 24 |
+
self.conv1 = MPConv1D(in_dim, out_dim, kernel_size=kernel_size)
|
| 25 |
+
self.conv2 = MPConv1D(out_dim, out_dim, kernel_size=kernel_size)
|
| 26 |
+
if self.in_dim != self.out_dim:
|
| 27 |
+
if self.use_conv_shortcut:
|
| 28 |
+
self.conv_shortcut = MPConv1D(in_dim, out_dim, kernel_size=kernel_size)
|
| 29 |
+
else:
|
| 30 |
+
self.nin_shortcut = MPConv1D(in_dim, out_dim, kernel_size=1)
|
| 31 |
+
|
| 32 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 33 |
+
|
| 34 |
+
# pixel norm
|
| 35 |
+
if self.use_norm:
|
| 36 |
+
x = normalize(x, dim=1)
|
| 37 |
+
|
| 38 |
+
h = x
|
| 39 |
+
h = nonlinearity(h)
|
| 40 |
+
h = self.conv1(h)
|
| 41 |
+
|
| 42 |
+
h = nonlinearity(h)
|
| 43 |
+
h = self.conv2(h)
|
| 44 |
+
|
| 45 |
+
if self.in_dim != self.out_dim:
|
| 46 |
+
if self.use_conv_shortcut:
|
| 47 |
+
x = self.conv_shortcut(x)
|
| 48 |
+
else:
|
| 49 |
+
x = self.nin_shortcut(x)
|
| 50 |
+
|
| 51 |
+
return mp_sum(x, h, t=0.3)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class AttnBlock1D(nn.Module):
|
| 55 |
+
|
| 56 |
+
def __init__(self, in_channels, num_heads=1):
|
| 57 |
+
super().__init__()
|
| 58 |
+
self.in_channels = in_channels
|
| 59 |
+
|
| 60 |
+
self.num_heads = num_heads
|
| 61 |
+
self.qkv = MPConv1D(in_channels, in_channels * 3, kernel_size=1)
|
| 62 |
+
self.proj_out = MPConv1D(in_channels, in_channels, kernel_size=1)
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
h = x
|
| 66 |
+
y = self.qkv(h)
|
| 67 |
+
y = y.reshape(y.shape[0], self.num_heads, -1, 3, y.shape[-1])
|
| 68 |
+
q, k, v = normalize(y, dim=2).unbind(3)
|
| 69 |
+
|
| 70 |
+
q = rearrange(q, 'b h c l -> b h l c')
|
| 71 |
+
k = rearrange(k, 'b h c l -> b h l c')
|
| 72 |
+
v = rearrange(v, 'b h c l -> b h l c')
|
| 73 |
+
|
| 74 |
+
h = F.scaled_dot_product_attention(q, k, v)
|
| 75 |
+
h = rearrange(h, 'b h l c -> b (h c) l')
|
| 76 |
+
|
| 77 |
+
h = self.proj_out(h)
|
| 78 |
+
|
| 79 |
+
return mp_sum(x, h, t=0.3)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class Upsample1D(nn.Module):
|
| 83 |
+
|
| 84 |
+
def __init__(self, in_channels, with_conv):
|
| 85 |
+
super().__init__()
|
| 86 |
+
self.with_conv = with_conv
|
| 87 |
+
if self.with_conv:
|
| 88 |
+
self.conv = MPConv1D(in_channels, in_channels, kernel_size=3)
|
| 89 |
+
|
| 90 |
+
def forward(self, x):
|
| 91 |
+
x = F.interpolate(x, scale_factor=2.0, mode='nearest-exact') # support 3D tensor(B,C,T)
|
| 92 |
+
if self.with_conv:
|
| 93 |
+
x = self.conv(x)
|
| 94 |
+
return x
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class Downsample1D(nn.Module):
|
| 98 |
+
|
| 99 |
+
def __init__(self, in_channels, with_conv):
|
| 100 |
+
super().__init__()
|
| 101 |
+
self.with_conv = with_conv
|
| 102 |
+
if self.with_conv:
|
| 103 |
+
# no asymmetric padding in torch conv, must do it ourselves
|
| 104 |
+
self.conv1 = MPConv1D(in_channels, in_channels, kernel_size=1)
|
| 105 |
+
self.conv2 = MPConv1D(in_channels, in_channels, kernel_size=1)
|
| 106 |
+
|
| 107 |
+
def forward(self, x):
|
| 108 |
+
|
| 109 |
+
if self.with_conv:
|
| 110 |
+
x = self.conv1(x)
|
| 111 |
+
|
| 112 |
+
x = F.avg_pool1d(x, kernel_size=2, stride=2)
|
| 113 |
+
|
| 114 |
+
if self.with_conv:
|
| 115 |
+
x = self.conv2(x)
|
| 116 |
+
|
| 117 |
+
return x
|
mmaudio/ext/bigvgan/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 NVIDIA CORPORATION.
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
mmaudio/ext/bigvgan/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .bigvgan import BigVGAN
|
mmaudio/ext/bigvgan/activations.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn, sin, pow
|
| 6 |
+
from torch.nn import Parameter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Snake(nn.Module):
|
| 10 |
+
'''
|
| 11 |
+
Implementation of a sine-based periodic activation function
|
| 12 |
+
Shape:
|
| 13 |
+
- Input: (B, C, T)
|
| 14 |
+
- Output: (B, C, T), same shape as the input
|
| 15 |
+
Parameters:
|
| 16 |
+
- alpha - trainable parameter
|
| 17 |
+
References:
|
| 18 |
+
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 19 |
+
https://arxiv.org/abs/2006.08195
|
| 20 |
+
Examples:
|
| 21 |
+
>>> a1 = snake(256)
|
| 22 |
+
>>> x = torch.randn(256)
|
| 23 |
+
>>> x = a1(x)
|
| 24 |
+
'''
|
| 25 |
+
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
| 26 |
+
'''
|
| 27 |
+
Initialization.
|
| 28 |
+
INPUT:
|
| 29 |
+
- in_features: shape of the input
|
| 30 |
+
- alpha: trainable parameter
|
| 31 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 32 |
+
alpha will be trained along with the rest of your model.
|
| 33 |
+
'''
|
| 34 |
+
super(Snake, self).__init__()
|
| 35 |
+
self.in_features = in_features
|
| 36 |
+
|
| 37 |
+
# initialize alpha
|
| 38 |
+
self.alpha_logscale = alpha_logscale
|
| 39 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
| 40 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
| 41 |
+
else: # linear scale alphas initialized to ones
|
| 42 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
| 43 |
+
|
| 44 |
+
self.alpha.requires_grad = alpha_trainable
|
| 45 |
+
|
| 46 |
+
self.no_div_by_zero = 0.000000001
|
| 47 |
+
|
| 48 |
+
def forward(self, x):
|
| 49 |
+
'''
|
| 50 |
+
Forward pass of the function.
|
| 51 |
+
Applies the function to the input elementwise.
|
| 52 |
+
Snake ∶= x + 1/a * sin^2 (xa)
|
| 53 |
+
'''
|
| 54 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
| 55 |
+
if self.alpha_logscale:
|
| 56 |
+
alpha = torch.exp(alpha)
|
| 57 |
+
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
| 58 |
+
|
| 59 |
+
return x
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class SnakeBeta(nn.Module):
|
| 63 |
+
'''
|
| 64 |
+
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
| 65 |
+
Shape:
|
| 66 |
+
- Input: (B, C, T)
|
| 67 |
+
- Output: (B, C, T), same shape as the input
|
| 68 |
+
Parameters:
|
| 69 |
+
- alpha - trainable parameter that controls frequency
|
| 70 |
+
- beta - trainable parameter that controls magnitude
|
| 71 |
+
References:
|
| 72 |
+
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 73 |
+
https://arxiv.org/abs/2006.08195
|
| 74 |
+
Examples:
|
| 75 |
+
>>> a1 = snakebeta(256)
|
| 76 |
+
>>> x = torch.randn(256)
|
| 77 |
+
>>> x = a1(x)
|
| 78 |
+
'''
|
| 79 |
+
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
|
| 80 |
+
'''
|
| 81 |
+
Initialization.
|
| 82 |
+
INPUT:
|
| 83 |
+
- in_features: shape of the input
|
| 84 |
+
- alpha - trainable parameter that controls frequency
|
| 85 |
+
- beta - trainable parameter that controls magnitude
|
| 86 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 87 |
+
beta is initialized to 1 by default, higher values = higher-magnitude.
|
| 88 |
+
alpha will be trained along with the rest of your model.
|
| 89 |
+
'''
|
| 90 |
+
super(SnakeBeta, self).__init__()
|
| 91 |
+
self.in_features = in_features
|
| 92 |
+
|
| 93 |
+
# initialize alpha
|
| 94 |
+
self.alpha_logscale = alpha_logscale
|
| 95 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
| 96 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
| 97 |
+
self.beta = Parameter(torch.zeros(in_features) * alpha)
|
| 98 |
+
else: # linear scale alphas initialized to ones
|
| 99 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
| 100 |
+
self.beta = Parameter(torch.ones(in_features) * alpha)
|
| 101 |
+
|
| 102 |
+
self.alpha.requires_grad = alpha_trainable
|
| 103 |
+
self.beta.requires_grad = alpha_trainable
|
| 104 |
+
|
| 105 |
+
self.no_div_by_zero = 0.000000001
|
| 106 |
+
|
| 107 |
+
def forward(self, x):
|
| 108 |
+
'''
|
| 109 |
+
Forward pass of the function.
|
| 110 |
+
Applies the function to the input elementwise.
|
| 111 |
+
SnakeBeta ∶= x + 1/b * sin^2 (xa)
|
| 112 |
+
'''
|
| 113 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
| 114 |
+
beta = self.beta.unsqueeze(0).unsqueeze(-1)
|
| 115 |
+
if self.alpha_logscale:
|
| 116 |
+
alpha = torch.exp(alpha)
|
| 117 |
+
beta = torch.exp(beta)
|
| 118 |
+
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
| 119 |
+
|
| 120 |
+
return x
|
mmaudio/ext/bigvgan/alias_free_torch/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
from .filter import *
|
| 5 |
+
from .resample import *
|
| 6 |
+
from .act import *
|
mmaudio/ext/bigvgan/alias_free_torch/act.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from .resample import UpSample1d, DownSample1d
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Activation1d(nn.Module):
|
| 9 |
+
def __init__(self,
|
| 10 |
+
activation,
|
| 11 |
+
up_ratio: int = 2,
|
| 12 |
+
down_ratio: int = 2,
|
| 13 |
+
up_kernel_size: int = 12,
|
| 14 |
+
down_kernel_size: int = 12):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.up_ratio = up_ratio
|
| 17 |
+
self.down_ratio = down_ratio
|
| 18 |
+
self.act = activation
|
| 19 |
+
self.upsample = UpSample1d(up_ratio, up_kernel_size)
|
| 20 |
+
self.downsample = DownSample1d(down_ratio, down_kernel_size)
|
| 21 |
+
|
| 22 |
+
# x: [B,C,T]
|
| 23 |
+
def forward(self, x):
|
| 24 |
+
x = self.upsample(x)
|
| 25 |
+
x = self.act(x)
|
| 26 |
+
x = self.downsample(x)
|
| 27 |
+
|
| 28 |
+
return x
|
mmaudio/ext/bigvgan/alias_free_torch/filter.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import math
|
| 8 |
+
|
| 9 |
+
if 'sinc' in dir(torch):
|
| 10 |
+
sinc = torch.sinc
|
| 11 |
+
else:
|
| 12 |
+
# This code is adopted from adefossez's julius.core.sinc under the MIT License
|
| 13 |
+
# https://adefossez.github.io/julius/julius/core.html
|
| 14 |
+
# LICENSE is in incl_licenses directory.
|
| 15 |
+
def sinc(x: torch.Tensor):
|
| 16 |
+
"""
|
| 17 |
+
Implementation of sinc, i.e. sin(pi * x) / (pi * x)
|
| 18 |
+
__Warning__: Different to julius.sinc, the input is multiplied by `pi`!
|
| 19 |
+
"""
|
| 20 |
+
return torch.where(x == 0,
|
| 21 |
+
torch.tensor(1., device=x.device, dtype=x.dtype),
|
| 22 |
+
torch.sin(math.pi * x) / math.pi / x)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
|
| 26 |
+
# https://adefossez.github.io/julius/julius/lowpass.html
|
| 27 |
+
# LICENSE is in incl_licenses directory.
|
| 28 |
+
def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
|
| 29 |
+
even = (kernel_size % 2 == 0)
|
| 30 |
+
half_size = kernel_size // 2
|
| 31 |
+
|
| 32 |
+
#For kaiser window
|
| 33 |
+
delta_f = 4 * half_width
|
| 34 |
+
A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
|
| 35 |
+
if A > 50.:
|
| 36 |
+
beta = 0.1102 * (A - 8.7)
|
| 37 |
+
elif A >= 21.:
|
| 38 |
+
beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.)
|
| 39 |
+
else:
|
| 40 |
+
beta = 0.
|
| 41 |
+
window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
|
| 42 |
+
|
| 43 |
+
# ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
|
| 44 |
+
if even:
|
| 45 |
+
time = (torch.arange(-half_size, half_size) + 0.5)
|
| 46 |
+
else:
|
| 47 |
+
time = torch.arange(kernel_size) - half_size
|
| 48 |
+
if cutoff == 0:
|
| 49 |
+
filter_ = torch.zeros_like(time)
|
| 50 |
+
else:
|
| 51 |
+
filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
|
| 52 |
+
# Normalize filter to have sum = 1, otherwise we will have a small leakage
|
| 53 |
+
# of the constant component in the input signal.
|
| 54 |
+
filter_ /= filter_.sum()
|
| 55 |
+
filter = filter_.view(1, 1, kernel_size)
|
| 56 |
+
|
| 57 |
+
return filter
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class LowPassFilter1d(nn.Module):
|
| 61 |
+
def __init__(self,
|
| 62 |
+
cutoff=0.5,
|
| 63 |
+
half_width=0.6,
|
| 64 |
+
stride: int = 1,
|
| 65 |
+
padding: bool = True,
|
| 66 |
+
padding_mode: str = 'replicate',
|
| 67 |
+
kernel_size: int = 12):
|
| 68 |
+
# kernel_size should be even number for stylegan3 setup,
|
| 69 |
+
# in this implementation, odd number is also possible.
|
| 70 |
+
super().__init__()
|
| 71 |
+
if cutoff < -0.:
|
| 72 |
+
raise ValueError("Minimum cutoff must be larger than zero.")
|
| 73 |
+
if cutoff > 0.5:
|
| 74 |
+
raise ValueError("A cutoff above 0.5 does not make sense.")
|
| 75 |
+
self.kernel_size = kernel_size
|
| 76 |
+
self.even = (kernel_size % 2 == 0)
|
| 77 |
+
self.pad_left = kernel_size // 2 - int(self.even)
|
| 78 |
+
self.pad_right = kernel_size // 2
|
| 79 |
+
self.stride = stride
|
| 80 |
+
self.padding = padding
|
| 81 |
+
self.padding_mode = padding_mode
|
| 82 |
+
filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
|
| 83 |
+
self.register_buffer("filter", filter)
|
| 84 |
+
|
| 85 |
+
#input [B, C, T]
|
| 86 |
+
def forward(self, x):
|
| 87 |
+
_, C, _ = x.shape
|
| 88 |
+
|
| 89 |
+
if self.padding:
|
| 90 |
+
x = F.pad(x, (self.pad_left, self.pad_right),
|
| 91 |
+
mode=self.padding_mode)
|
| 92 |
+
out = F.conv1d(x, self.filter.expand(C, -1, -1),
|
| 93 |
+
stride=self.stride, groups=C)
|
| 94 |
+
|
| 95 |
+
return out
|
mmaudio/ext/bigvgan/alias_free_torch/resample.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
from .filter import LowPassFilter1d
|
| 7 |
+
from .filter import kaiser_sinc_filter1d
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class UpSample1d(nn.Module):
|
| 11 |
+
def __init__(self, ratio=2, kernel_size=None):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.ratio = ratio
|
| 14 |
+
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
|
| 15 |
+
self.stride = ratio
|
| 16 |
+
self.pad = self.kernel_size // ratio - 1
|
| 17 |
+
self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
|
| 18 |
+
self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
|
| 19 |
+
filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio,
|
| 20 |
+
half_width=0.6 / ratio,
|
| 21 |
+
kernel_size=self.kernel_size)
|
| 22 |
+
self.register_buffer("filter", filter)
|
| 23 |
+
|
| 24 |
+
# x: [B, C, T]
|
| 25 |
+
def forward(self, x):
|
| 26 |
+
_, C, _ = x.shape
|
| 27 |
+
|
| 28 |
+
x = F.pad(x, (self.pad, self.pad), mode='replicate')
|
| 29 |
+
x = self.ratio * F.conv_transpose1d(
|
| 30 |
+
x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
|
| 31 |
+
x = x[..., self.pad_left:-self.pad_right]
|
| 32 |
+
|
| 33 |
+
return x
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class DownSample1d(nn.Module):
|
| 37 |
+
def __init__(self, ratio=2, kernel_size=None):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.ratio = ratio
|
| 40 |
+
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
|
| 41 |
+
self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio,
|
| 42 |
+
half_width=0.6 / ratio,
|
| 43 |
+
stride=ratio,
|
| 44 |
+
kernel_size=self.kernel_size)
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
xx = self.lowpass(x)
|
| 48 |
+
|
| 49 |
+
return xx
|
mmaudio/ext/bigvgan/bigvgan.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from omegaconf import OmegaConf
|
| 6 |
+
|
| 7 |
+
from mmaudio.ext.bigvgan.models import BigVGANVocoder
|
| 8 |
+
|
| 9 |
+
_bigvgan_vocoder_path = Path(__file__).parent / 'bigvgan_vocoder.yml'
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BigVGAN(nn.Module):
|
| 13 |
+
|
| 14 |
+
def __init__(self, ckpt_path, config_path=_bigvgan_vocoder_path):
|
| 15 |
+
super().__init__()
|
| 16 |
+
vocoder_cfg = OmegaConf.load(config_path)
|
| 17 |
+
self.vocoder = BigVGANVocoder(vocoder_cfg).eval()
|
| 18 |
+
vocoder_ckpt = torch.load(ckpt_path, map_location='cpu', weights_only=True)['generator']
|
| 19 |
+
self.vocoder.load_state_dict(vocoder_ckpt)
|
| 20 |
+
|
| 21 |
+
self.weight_norm_removed = False
|
| 22 |
+
self.remove_weight_norm()
|
| 23 |
+
|
| 24 |
+
@torch.inference_mode()
|
| 25 |
+
def forward(self, x):
|
| 26 |
+
assert self.weight_norm_removed, 'call remove_weight_norm() before inference'
|
| 27 |
+
return self.vocoder(x)
|
| 28 |
+
|
| 29 |
+
def remove_weight_norm(self):
|
| 30 |
+
self.vocoder.remove_weight_norm()
|
| 31 |
+
self.weight_norm_removed = True
|
| 32 |
+
return self
|
mmaudio/ext/bigvgan/bigvgan_vocoder.yml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03f207de4fade6869ad17bf6e1c22eb2234edc4b54686067a38bd267379b6683
|
| 3 |
+
size 745
|
mmaudio/ext/bigvgan/env.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import shutil
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class AttrDict(dict):
|
| 9 |
+
def __init__(self, *args, **kwargs):
|
| 10 |
+
super(AttrDict, self).__init__(*args, **kwargs)
|
| 11 |
+
self.__dict__ = self
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_env(config, config_name, path):
|
| 15 |
+
t_path = os.path.join(path, config_name)
|
| 16 |
+
if config != t_path:
|
| 17 |
+
os.makedirs(path, exist_ok=True)
|
| 18 |
+
shutil.copyfile(config, os.path.join(path, config_name))
|
mmaudio/ext/bigvgan/incl_licenses/LICENSE_1
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2020 Jungil Kong
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
mmaudio/ext/bigvgan/incl_licenses/LICENSE_2
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2020 Edward Dixon
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
mmaudio/ext/bigvgan/incl_licenses/LICENSE_3
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
mmaudio/ext/bigvgan/incl_licenses/LICENSE_4
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD 3-Clause License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2019, Seungwon Park 박승원
|
| 4 |
+
All rights reserved.
|
| 5 |
+
|
| 6 |
+
Redistribution and use in source and binary forms, with or without
|
| 7 |
+
modification, are permitted provided that the following conditions are met:
|
| 8 |
+
|
| 9 |
+
1. Redistributions of source code must retain the above copyright notice, this
|
| 10 |
+
list of conditions and the following disclaimer.
|
| 11 |
+
|
| 12 |
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
| 13 |
+
this list of conditions and the following disclaimer in the documentation
|
| 14 |
+
and/or other materials provided with the distribution.
|
| 15 |
+
|
| 16 |
+
3. Neither the name of the copyright holder nor the names of its
|
| 17 |
+
contributors may be used to endorse or promote products derived from
|
| 18 |
+
this software without specific prior written permission.
|
| 19 |
+
|
| 20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
mmaudio/ext/bigvgan/incl_licenses/LICENSE_5
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright 2020 Alexandre Défossez
|
| 2 |
+
|
| 3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
| 4 |
+
associated documentation files (the "Software"), to deal in the Software without restriction,
|
| 5 |
+
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
| 6 |
+
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
| 7 |
+
furnished to do so, subject to the following conditions:
|
| 8 |
+
|
| 9 |
+
The above copyright notice and this permission notice shall be included in all copies or
|
| 10 |
+
substantial portions of the Software.
|
| 11 |
+
|
| 12 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
| 13 |
+
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 14 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
| 15 |
+
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 16 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
mmaudio/ext/bigvgan/models.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2022 NVIDIA CORPORATION.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
|
| 4 |
+
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
|
| 5 |
+
# LICENSE is in incl_licenses directory.
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.nn import Conv1d, ConvTranspose1d
|
| 10 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 11 |
+
from torch.nn.utils.parametrize import remove_parametrizations
|
| 12 |
+
|
| 13 |
+
from mmaudio.ext.bigvgan import activations
|
| 14 |
+
from mmaudio.ext.bigvgan.alias_free_torch import *
|
| 15 |
+
from mmaudio.ext.bigvgan.utils import get_padding, init_weights
|
| 16 |
+
|
| 17 |
+
LRELU_SLOPE = 0.1
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class AMPBlock1(torch.nn.Module):
|
| 21 |
+
|
| 22 |
+
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None):
|
| 23 |
+
super(AMPBlock1, self).__init__()
|
| 24 |
+
self.h = h
|
| 25 |
+
|
| 26 |
+
self.convs1 = nn.ModuleList([
|
| 27 |
+
weight_norm(
|
| 28 |
+
Conv1d(channels,
|
| 29 |
+
channels,
|
| 30 |
+
kernel_size,
|
| 31 |
+
1,
|
| 32 |
+
dilation=dilation[0],
|
| 33 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
| 34 |
+
weight_norm(
|
| 35 |
+
Conv1d(channels,
|
| 36 |
+
channels,
|
| 37 |
+
kernel_size,
|
| 38 |
+
1,
|
| 39 |
+
dilation=dilation[1],
|
| 40 |
+
padding=get_padding(kernel_size, dilation[1]))),
|
| 41 |
+
weight_norm(
|
| 42 |
+
Conv1d(channels,
|
| 43 |
+
channels,
|
| 44 |
+
kernel_size,
|
| 45 |
+
1,
|
| 46 |
+
dilation=dilation[2],
|
| 47 |
+
padding=get_padding(kernel_size, dilation[2])))
|
| 48 |
+
])
|
| 49 |
+
self.convs1.apply(init_weights)
|
| 50 |
+
|
| 51 |
+
self.convs2 = nn.ModuleList([
|
| 52 |
+
weight_norm(
|
| 53 |
+
Conv1d(channels,
|
| 54 |
+
channels,
|
| 55 |
+
kernel_size,
|
| 56 |
+
1,
|
| 57 |
+
dilation=1,
|
| 58 |
+
padding=get_padding(kernel_size, 1))),
|
| 59 |
+
weight_norm(
|
| 60 |
+
Conv1d(channels,
|
| 61 |
+
channels,
|
| 62 |
+
kernel_size,
|
| 63 |
+
1,
|
| 64 |
+
dilation=1,
|
| 65 |
+
padding=get_padding(kernel_size, 1))),
|
| 66 |
+
weight_norm(
|
| 67 |
+
Conv1d(channels,
|
| 68 |
+
channels,
|
| 69 |
+
kernel_size,
|
| 70 |
+
1,
|
| 71 |
+
dilation=1,
|
| 72 |
+
padding=get_padding(kernel_size, 1)))
|
| 73 |
+
])
|
| 74 |
+
self.convs2.apply(init_weights)
|
| 75 |
+
|
| 76 |
+
self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
|
| 77 |
+
|
| 78 |
+
if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
|
| 79 |
+
self.activations = nn.ModuleList([
|
| 80 |
+
Activation1d(
|
| 81 |
+
activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
|
| 82 |
+
for _ in range(self.num_layers)
|
| 83 |
+
])
|
| 84 |
+
elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
|
| 85 |
+
self.activations = nn.ModuleList([
|
| 86 |
+
Activation1d(
|
| 87 |
+
activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
|
| 88 |
+
for _ in range(self.num_layers)
|
| 89 |
+
])
|
| 90 |
+
else:
|
| 91 |
+
raise NotImplementedError(
|
| 92 |
+
"activation incorrectly specified. check the config file and look for 'activation'."
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def forward(self, x):
|
| 96 |
+
acts1, acts2 = self.activations[::2], self.activations[1::2]
|
| 97 |
+
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
|
| 98 |
+
xt = a1(x)
|
| 99 |
+
xt = c1(xt)
|
| 100 |
+
xt = a2(xt)
|
| 101 |
+
xt = c2(xt)
|
| 102 |
+
x = xt + x
|
| 103 |
+
|
| 104 |
+
return x
|
| 105 |
+
|
| 106 |
+
def remove_weight_norm(self):
|
| 107 |
+
for l in self.convs1:
|
| 108 |
+
remove_parametrizations(l, 'weight')
|
| 109 |
+
for l in self.convs2:
|
| 110 |
+
remove_parametrizations(l, 'weight')
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class AMPBlock2(torch.nn.Module):
|
| 114 |
+
|
| 115 |
+
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None):
|
| 116 |
+
super(AMPBlock2, self).__init__()
|
| 117 |
+
self.h = h
|
| 118 |
+
|
| 119 |
+
self.convs = nn.ModuleList([
|
| 120 |
+
weight_norm(
|
| 121 |
+
Conv1d(channels,
|
| 122 |
+
channels,
|
| 123 |
+
kernel_size,
|
| 124 |
+
1,
|
| 125 |
+
dilation=dilation[0],
|
| 126 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
| 127 |
+
weight_norm(
|
| 128 |
+
Conv1d(channels,
|
| 129 |
+
channels,
|
| 130 |
+
kernel_size,
|
| 131 |
+
1,
|
| 132 |
+
dilation=dilation[1],
|
| 133 |
+
padding=get_padding(kernel_size, dilation[1])))
|
| 134 |
+
])
|
| 135 |
+
self.convs.apply(init_weights)
|
| 136 |
+
|
| 137 |
+
self.num_layers = len(self.convs) # total number of conv layers
|
| 138 |
+
|
| 139 |
+
if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
|
| 140 |
+
self.activations = nn.ModuleList([
|
| 141 |
+
Activation1d(
|
| 142 |
+
activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
|
| 143 |
+
for _ in range(self.num_layers)
|
| 144 |
+
])
|
| 145 |
+
elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
|
| 146 |
+
self.activations = nn.ModuleList([
|
| 147 |
+
Activation1d(
|
| 148 |
+
activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
|
| 149 |
+
for _ in range(self.num_layers)
|
| 150 |
+
])
|
| 151 |
+
else:
|
| 152 |
+
raise NotImplementedError(
|
| 153 |
+
"activation incorrectly specified. check the config file and look for 'activation'."
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
def forward(self, x):
|
| 157 |
+
for c, a in zip(self.convs, self.activations):
|
| 158 |
+
xt = a(x)
|
| 159 |
+
xt = c(xt)
|
| 160 |
+
x = xt + x
|
| 161 |
+
|
| 162 |
+
return x
|
| 163 |
+
|
| 164 |
+
def remove_weight_norm(self):
|
| 165 |
+
for l in self.convs:
|
| 166 |
+
remove_parametrizations(l, 'weight')
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class BigVGANVocoder(torch.nn.Module):
|
| 170 |
+
# this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks.
|
| 171 |
+
def __init__(self, h):
|
| 172 |
+
super().__init__()
|
| 173 |
+
self.h = h
|
| 174 |
+
|
| 175 |
+
self.num_kernels = len(h.resblock_kernel_sizes)
|
| 176 |
+
self.num_upsamples = len(h.upsample_rates)
|
| 177 |
+
|
| 178 |
+
# pre conv
|
| 179 |
+
self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
|
| 180 |
+
|
| 181 |
+
# define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
|
| 182 |
+
resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2
|
| 183 |
+
|
| 184 |
+
# transposed conv-based upsamplers. does not apply anti-aliasing
|
| 185 |
+
self.ups = nn.ModuleList()
|
| 186 |
+
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
|
| 187 |
+
self.ups.append(
|
| 188 |
+
nn.ModuleList([
|
| 189 |
+
weight_norm(
|
| 190 |
+
ConvTranspose1d(h.upsample_initial_channel // (2**i),
|
| 191 |
+
h.upsample_initial_channel // (2**(i + 1)),
|
| 192 |
+
k,
|
| 193 |
+
u,
|
| 194 |
+
padding=(k - u) // 2))
|
| 195 |
+
]))
|
| 196 |
+
|
| 197 |
+
# residual blocks using anti-aliased multi-periodicity composition modules (AMP)
|
| 198 |
+
self.resblocks = nn.ModuleList()
|
| 199 |
+
for i in range(len(self.ups)):
|
| 200 |
+
ch = h.upsample_initial_channel // (2**(i + 1))
|
| 201 |
+
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
|
| 202 |
+
self.resblocks.append(resblock(h, ch, k, d, activation=h.activation))
|
| 203 |
+
|
| 204 |
+
# post conv
|
| 205 |
+
if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing
|
| 206 |
+
activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale)
|
| 207 |
+
self.activation_post = Activation1d(activation=activation_post)
|
| 208 |
+
elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing
|
| 209 |
+
activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
|
| 210 |
+
self.activation_post = Activation1d(activation=activation_post)
|
| 211 |
+
else:
|
| 212 |
+
raise NotImplementedError(
|
| 213 |
+
"activation incorrectly specified. check the config file and look for 'activation'."
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
|
| 217 |
+
|
| 218 |
+
# weight initialization
|
| 219 |
+
for i in range(len(self.ups)):
|
| 220 |
+
self.ups[i].apply(init_weights)
|
| 221 |
+
self.conv_post.apply(init_weights)
|
| 222 |
+
|
| 223 |
+
def forward(self, x):
|
| 224 |
+
# pre conv
|
| 225 |
+
x = self.conv_pre(x)
|
| 226 |
+
|
| 227 |
+
for i in range(self.num_upsamples):
|
| 228 |
+
# upsampling
|
| 229 |
+
for i_up in range(len(self.ups[i])):
|
| 230 |
+
x = self.ups[i][i_up](x)
|
| 231 |
+
# AMP blocks
|
| 232 |
+
xs = None
|
| 233 |
+
for j in range(self.num_kernels):
|
| 234 |
+
if xs is None:
|
| 235 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
| 236 |
+
else:
|
| 237 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
| 238 |
+
x = xs / self.num_kernels
|
| 239 |
+
|
| 240 |
+
# post conv
|
| 241 |
+
x = self.activation_post(x)
|
| 242 |
+
x = self.conv_post(x)
|
| 243 |
+
x = torch.tanh(x)
|
| 244 |
+
|
| 245 |
+
return x
|
| 246 |
+
|
| 247 |
+
def remove_weight_norm(self):
|
| 248 |
+
print('Removing weight norm...')
|
| 249 |
+
for l in self.ups:
|
| 250 |
+
for l_i in l:
|
| 251 |
+
remove_parametrizations(l_i, 'weight')
|
| 252 |
+
for l in self.resblocks:
|
| 253 |
+
l.remove_weight_norm()
|
| 254 |
+
remove_parametrizations(self.conv_pre, 'weight')
|
| 255 |
+
remove_parametrizations(self.conv_post, 'weight')
|
mmaudio/ext/bigvgan/utils.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 11 |
+
classname = m.__class__.__name__
|
| 12 |
+
if classname.find("Conv") != -1:
|
| 13 |
+
m.weight.data.normal_(mean, std)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def apply_weight_norm(m):
|
| 17 |
+
classname = m.__class__.__name__
|
| 18 |
+
if classname.find("Conv") != -1:
|
| 19 |
+
weight_norm(m)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_padding(kernel_size, dilation=1):
|
| 23 |
+
return int((kernel_size * dilation - dilation) / 2)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def load_checkpoint(filepath, device):
|
| 27 |
+
assert os.path.isfile(filepath)
|
| 28 |
+
print("Loading '{}'".format(filepath))
|
| 29 |
+
checkpoint_dict = torch.load(filepath, map_location=device)
|
| 30 |
+
print("Complete.")
|
| 31 |
+
return checkpoint_dict
|
mmaudio/ext/bigvgan_v2/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 NVIDIA CORPORATION.
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
mmaudio/ext/bigvgan_v2/__init__.py
ADDED
|
File without changes
|
mmaudio/ext/bigvgan_v2/activations.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn, sin, pow
|
| 6 |
+
from torch.nn import Parameter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Snake(nn.Module):
|
| 10 |
+
"""
|
| 11 |
+
Implementation of a sine-based periodic activation function
|
| 12 |
+
Shape:
|
| 13 |
+
- Input: (B, C, T)
|
| 14 |
+
- Output: (B, C, T), same shape as the input
|
| 15 |
+
Parameters:
|
| 16 |
+
- alpha - trainable parameter
|
| 17 |
+
References:
|
| 18 |
+
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 19 |
+
https://arxiv.org/abs/2006.08195
|
| 20 |
+
Examples:
|
| 21 |
+
>>> a1 = snake(256)
|
| 22 |
+
>>> x = torch.randn(256)
|
| 23 |
+
>>> x = a1(x)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
|
| 28 |
+
):
|
| 29 |
+
"""
|
| 30 |
+
Initialization.
|
| 31 |
+
INPUT:
|
| 32 |
+
- in_features: shape of the input
|
| 33 |
+
- alpha: trainable parameter
|
| 34 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 35 |
+
alpha will be trained along with the rest of your model.
|
| 36 |
+
"""
|
| 37 |
+
super(Snake, self).__init__()
|
| 38 |
+
self.in_features = in_features
|
| 39 |
+
|
| 40 |
+
# Initialize alpha
|
| 41 |
+
self.alpha_logscale = alpha_logscale
|
| 42 |
+
if self.alpha_logscale: # Log scale alphas initialized to zeros
|
| 43 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
| 44 |
+
else: # Linear scale alphas initialized to ones
|
| 45 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
| 46 |
+
|
| 47 |
+
self.alpha.requires_grad = alpha_trainable
|
| 48 |
+
|
| 49 |
+
self.no_div_by_zero = 0.000000001
|
| 50 |
+
|
| 51 |
+
def forward(self, x):
|
| 52 |
+
"""
|
| 53 |
+
Forward pass of the function.
|
| 54 |
+
Applies the function to the input elementwise.
|
| 55 |
+
Snake ∶= x + 1/a * sin^2 (xa)
|
| 56 |
+
"""
|
| 57 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
|
| 58 |
+
if self.alpha_logscale:
|
| 59 |
+
alpha = torch.exp(alpha)
|
| 60 |
+
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
| 61 |
+
|
| 62 |
+
return x
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class SnakeBeta(nn.Module):
|
| 66 |
+
"""
|
| 67 |
+
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
| 68 |
+
Shape:
|
| 69 |
+
- Input: (B, C, T)
|
| 70 |
+
- Output: (B, C, T), same shape as the input
|
| 71 |
+
Parameters:
|
| 72 |
+
- alpha - trainable parameter that controls frequency
|
| 73 |
+
- beta - trainable parameter that controls magnitude
|
| 74 |
+
References:
|
| 75 |
+
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
| 76 |
+
https://arxiv.org/abs/2006.08195
|
| 77 |
+
Examples:
|
| 78 |
+
>>> a1 = snakebeta(256)
|
| 79 |
+
>>> x = torch.randn(256)
|
| 80 |
+
>>> x = a1(x)
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(
|
| 84 |
+
self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
|
| 85 |
+
):
|
| 86 |
+
"""
|
| 87 |
+
Initialization.
|
| 88 |
+
INPUT:
|
| 89 |
+
- in_features: shape of the input
|
| 90 |
+
- alpha - trainable parameter that controls frequency
|
| 91 |
+
- beta - trainable parameter that controls magnitude
|
| 92 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
| 93 |
+
beta is initialized to 1 by default, higher values = higher-magnitude.
|
| 94 |
+
alpha will be trained along with the rest of your model.
|
| 95 |
+
"""
|
| 96 |
+
super(SnakeBeta, self).__init__()
|
| 97 |
+
self.in_features = in_features
|
| 98 |
+
|
| 99 |
+
# Initialize alpha
|
| 100 |
+
self.alpha_logscale = alpha_logscale
|
| 101 |
+
if self.alpha_logscale: # Log scale alphas initialized to zeros
|
| 102 |
+
self.alpha = Parameter(torch.zeros(in_features) * alpha)
|
| 103 |
+
self.beta = Parameter(torch.zeros(in_features) * alpha)
|
| 104 |
+
else: # Linear scale alphas initialized to ones
|
| 105 |
+
self.alpha = Parameter(torch.ones(in_features) * alpha)
|
| 106 |
+
self.beta = Parameter(torch.ones(in_features) * alpha)
|
| 107 |
+
|
| 108 |
+
self.alpha.requires_grad = alpha_trainable
|
| 109 |
+
self.beta.requires_grad = alpha_trainable
|
| 110 |
+
|
| 111 |
+
self.no_div_by_zero = 0.000000001
|
| 112 |
+
|
| 113 |
+
def forward(self, x):
|
| 114 |
+
"""
|
| 115 |
+
Forward pass of the function.
|
| 116 |
+
Applies the function to the input elementwise.
|
| 117 |
+
SnakeBeta ∶= x + 1/b * sin^2 (xa)
|
| 118 |
+
"""
|
| 119 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
|
| 120 |
+
beta = self.beta.unsqueeze(0).unsqueeze(-1)
|
| 121 |
+
if self.alpha_logscale:
|
| 122 |
+
alpha = torch.exp(alpha)
|
| 123 |
+
beta = torch.exp(beta)
|
| 124 |
+
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
| 125 |
+
|
| 126 |
+
return x
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py
ADDED
|
File without changes
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 NVIDIA CORPORATION.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from alias_free_activation.torch.resample import UpSample1d, DownSample1d
|
| 7 |
+
|
| 8 |
+
# load fused CUDA kernel: this enables importing anti_alias_activation_cuda
|
| 9 |
+
from alias_free_activation.cuda import load
|
| 10 |
+
|
| 11 |
+
anti_alias_activation_cuda = load.load()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class FusedAntiAliasActivation(torch.autograd.Function):
|
| 15 |
+
"""
|
| 16 |
+
Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs.
|
| 17 |
+
The hyperparameters are hard-coded in the kernel to maximize speed.
|
| 18 |
+
NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta):
|
| 23 |
+
activation_results = anti_alias_activation_cuda.forward(
|
| 24 |
+
inputs, up_ftr, down_ftr, alpha, beta
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
return activation_results
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def backward(ctx, output_grads):
|
| 31 |
+
raise NotImplementedError
|
| 32 |
+
return output_grads, None, None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class Activation1d(nn.Module):
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
activation,
|
| 39 |
+
up_ratio: int = 2,
|
| 40 |
+
down_ratio: int = 2,
|
| 41 |
+
up_kernel_size: int = 12,
|
| 42 |
+
down_kernel_size: int = 12,
|
| 43 |
+
fused: bool = True,
|
| 44 |
+
):
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.up_ratio = up_ratio
|
| 47 |
+
self.down_ratio = down_ratio
|
| 48 |
+
self.act = activation
|
| 49 |
+
self.upsample = UpSample1d(up_ratio, up_kernel_size)
|
| 50 |
+
self.downsample = DownSample1d(down_ratio, down_kernel_size)
|
| 51 |
+
|
| 52 |
+
self.fused = fused # Whether to use fused CUDA kernel or not
|
| 53 |
+
|
| 54 |
+
def forward(self, x):
|
| 55 |
+
if not self.fused:
|
| 56 |
+
x = self.upsample(x)
|
| 57 |
+
x = self.act(x)
|
| 58 |
+
x = self.downsample(x)
|
| 59 |
+
return x
|
| 60 |
+
else:
|
| 61 |
+
if self.act.__class__.__name__ == "Snake":
|
| 62 |
+
beta = self.act.alpha.data # Snake uses same params for alpha and beta
|
| 63 |
+
else:
|
| 64 |
+
beta = (
|
| 65 |
+
self.act.beta.data
|
| 66 |
+
) # Snakebeta uses different params for alpha and beta
|
| 67 |
+
alpha = self.act.alpha.data
|
| 68 |
+
if (
|
| 69 |
+
not self.act.alpha_logscale
|
| 70 |
+
): # Exp baked into cuda kernel, cancel it out with a log
|
| 71 |
+
alpha = torch.log(alpha)
|
| 72 |
+
beta = torch.log(beta)
|
| 73 |
+
|
| 74 |
+
x = FusedAntiAliasActivation.apply(
|
| 75 |
+
x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta
|
| 76 |
+
)
|
| 77 |
+
return x
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#include <torch/extension.h>
|
| 18 |
+
|
| 19 |
+
extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta);
|
| 20 |
+
|
| 21 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 22 |
+
m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)");
|
| 23 |
+
}
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#include <ATen/ATen.h>
|
| 18 |
+
#include <cuda.h>
|
| 19 |
+
#include <cuda_runtime.h>
|
| 20 |
+
#include <cuda_fp16.h>
|
| 21 |
+
#include <cuda_profiler_api.h>
|
| 22 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 23 |
+
#include <torch/extension.h>
|
| 24 |
+
#include "type_shim.h"
|
| 25 |
+
#include <assert.h>
|
| 26 |
+
#include <cfloat>
|
| 27 |
+
#include <limits>
|
| 28 |
+
#include <stdint.h>
|
| 29 |
+
#include <c10/macros/Macros.h>
|
| 30 |
+
|
| 31 |
+
namespace
|
| 32 |
+
{
|
| 33 |
+
// Hard-coded hyperparameters
|
| 34 |
+
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
|
| 35 |
+
constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4;
|
| 36 |
+
constexpr int BUFFER_SIZE = 32;
|
| 37 |
+
constexpr int FILTER_SIZE = 12;
|
| 38 |
+
constexpr int HALF_FILTER_SIZE = 6;
|
| 39 |
+
constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl
|
| 40 |
+
constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl
|
| 41 |
+
constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl
|
| 42 |
+
|
| 43 |
+
template <typename input_t, typename output_t, typename acc_t>
|
| 44 |
+
__global__ void anti_alias_activation_forward(
|
| 45 |
+
output_t *dst,
|
| 46 |
+
const input_t *src,
|
| 47 |
+
const input_t *up_ftr,
|
| 48 |
+
const input_t *down_ftr,
|
| 49 |
+
const input_t *alpha,
|
| 50 |
+
const input_t *beta,
|
| 51 |
+
int batch_size,
|
| 52 |
+
int channels,
|
| 53 |
+
int seq_len)
|
| 54 |
+
{
|
| 55 |
+
// Up and downsample filters
|
| 56 |
+
input_t up_filter[FILTER_SIZE];
|
| 57 |
+
input_t down_filter[FILTER_SIZE];
|
| 58 |
+
|
| 59 |
+
// Load data from global memory including extra indices reserved for replication paddings
|
| 60 |
+
input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0};
|
| 61 |
+
input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0};
|
| 62 |
+
|
| 63 |
+
// Output stores downsampled output before writing to dst
|
| 64 |
+
output_t output[BUFFER_SIZE];
|
| 65 |
+
|
| 66 |
+
// blockDim/threadIdx = (128, 1, 1)
|
| 67 |
+
// gridDim/blockIdx = (seq_blocks, channels, batches)
|
| 68 |
+
int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
|
| 69 |
+
int local_offset = threadIdx.x * BUFFER_SIZE;
|
| 70 |
+
int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset;
|
| 71 |
+
|
| 72 |
+
// intermediate have double the seq_len
|
| 73 |
+
int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2;
|
| 74 |
+
int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset;
|
| 75 |
+
|
| 76 |
+
// Get values needed for replication padding before moving pointer
|
| 77 |
+
const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
|
| 78 |
+
input_t seq_left_most_value = right_most_pntr[0];
|
| 79 |
+
input_t seq_right_most_value = right_most_pntr[seq_len - 1];
|
| 80 |
+
|
| 81 |
+
// Move src and dst pointers
|
| 82 |
+
src += block_offset + local_offset;
|
| 83 |
+
dst += block_offset + local_offset;
|
| 84 |
+
|
| 85 |
+
// Alpha and beta values for snake activatons. Applies exp by default
|
| 86 |
+
alpha = alpha + blockIdx.y;
|
| 87 |
+
input_t alpha_val = expf(alpha[0]);
|
| 88 |
+
beta = beta + blockIdx.y;
|
| 89 |
+
input_t beta_val = expf(beta[0]);
|
| 90 |
+
|
| 91 |
+
#pragma unroll
|
| 92 |
+
for (int it = 0; it < FILTER_SIZE; it += 1)
|
| 93 |
+
{
|
| 94 |
+
up_filter[it] = up_ftr[it];
|
| 95 |
+
down_filter[it] = down_ftr[it];
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
// Apply replication padding for upsampling, matching torch impl
|
| 99 |
+
#pragma unroll
|
| 100 |
+
for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1)
|
| 101 |
+
{
|
| 102 |
+
int element_index = seq_offset + it; // index for element
|
| 103 |
+
if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD))
|
| 104 |
+
{
|
| 105 |
+
elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value;
|
| 106 |
+
}
|
| 107 |
+
if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD))
|
| 108 |
+
{
|
| 109 |
+
elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value;
|
| 110 |
+
}
|
| 111 |
+
if ((element_index >= 0) && (element_index < seq_len))
|
| 112 |
+
{
|
| 113 |
+
elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it];
|
| 114 |
+
}
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
// Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later
|
| 118 |
+
#pragma unroll
|
| 119 |
+
for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1)
|
| 120 |
+
{
|
| 121 |
+
input_t acc = 0.0;
|
| 122 |
+
int element_index = intermediate_seq_offset + it; // index for intermediate
|
| 123 |
+
#pragma unroll
|
| 124 |
+
for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1)
|
| 125 |
+
{
|
| 126 |
+
if ((element_index + f_idx) >= 0)
|
| 127 |
+
{
|
| 128 |
+
acc += up_filter[f_idx] * elements[it + f_idx];
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
// Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later
|
| 135 |
+
double no_div_by_zero = 0.000000001;
|
| 136 |
+
#pragma unroll
|
| 137 |
+
for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1)
|
| 138 |
+
{
|
| 139 |
+
intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
// Apply replication padding before downsampling conv from intermediates
|
| 143 |
+
#pragma unroll
|
| 144 |
+
for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1)
|
| 145 |
+
{
|
| 146 |
+
intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT];
|
| 147 |
+
}
|
| 148 |
+
#pragma unroll
|
| 149 |
+
for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1)
|
| 150 |
+
{
|
| 151 |
+
intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1];
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
// Apply downsample strided convolution (assuming stride=2) from intermediates
|
| 155 |
+
#pragma unroll
|
| 156 |
+
for (int it = 0; it < BUFFER_SIZE; it += 1)
|
| 157 |
+
{
|
| 158 |
+
input_t acc = 0.0;
|
| 159 |
+
#pragma unroll
|
| 160 |
+
for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1)
|
| 161 |
+
{
|
| 162 |
+
// Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation
|
| 163 |
+
acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT];
|
| 164 |
+
}
|
| 165 |
+
output[it] = acc;
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
// Write output to dst
|
| 169 |
+
#pragma unroll
|
| 170 |
+
for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG)
|
| 171 |
+
{
|
| 172 |
+
int element_index = seq_offset + it;
|
| 173 |
+
if (element_index < seq_len)
|
| 174 |
+
{
|
| 175 |
+
dst[it] = output[it];
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
template <typename input_t, typename output_t, typename acc_t>
|
| 182 |
+
void dispatch_anti_alias_activation_forward(
|
| 183 |
+
output_t *dst,
|
| 184 |
+
const input_t *src,
|
| 185 |
+
const input_t *up_ftr,
|
| 186 |
+
const input_t *down_ftr,
|
| 187 |
+
const input_t *alpha,
|
| 188 |
+
const input_t *beta,
|
| 189 |
+
int batch_size,
|
| 190 |
+
int channels,
|
| 191 |
+
int seq_len)
|
| 192 |
+
{
|
| 193 |
+
if (seq_len == 0)
|
| 194 |
+
{
|
| 195 |
+
return;
|
| 196 |
+
}
|
| 197 |
+
else
|
| 198 |
+
{
|
| 199 |
+
// Use 128 threads per block to maximimize gpu utilization
|
| 200 |
+
constexpr int threads_per_block = 128;
|
| 201 |
+
constexpr int seq_len_per_block = 4096;
|
| 202 |
+
int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block;
|
| 203 |
+
dim3 blocks(blocks_per_seq_len, channels, batch_size);
|
| 204 |
+
dim3 threads(threads_per_block, 1, 1);
|
| 205 |
+
|
| 206 |
+
anti_alias_activation_forward<input_t, output_t, acc_t>
|
| 207 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len);
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta)
|
| 213 |
+
{
|
| 214 |
+
// Input is a 3d tensor with dimensions [batches, channels, seq_len]
|
| 215 |
+
const int batches = input.size(0);
|
| 216 |
+
const int channels = input.size(1);
|
| 217 |
+
const int seq_len = input.size(2);
|
| 218 |
+
|
| 219 |
+
// Output
|
| 220 |
+
auto act_options = input.options().requires_grad(false);
|
| 221 |
+
|
| 222 |
+
torch::Tensor anti_alias_activation_results =
|
| 223 |
+
torch::empty({batches, channels, seq_len}, act_options);
|
| 224 |
+
|
| 225 |
+
void *input_ptr = static_cast<void *>(input.data_ptr());
|
| 226 |
+
void *up_filter_ptr = static_cast<void *>(up_filter.data_ptr());
|
| 227 |
+
void *down_filter_ptr = static_cast<void *>(down_filter.data_ptr());
|
| 228 |
+
void *alpha_ptr = static_cast<void *>(alpha.data_ptr());
|
| 229 |
+
void *beta_ptr = static_cast<void *>(beta.data_ptr());
|
| 230 |
+
void *anti_alias_activation_results_ptr = static_cast<void *>(anti_alias_activation_results.data_ptr());
|
| 231 |
+
|
| 232 |
+
DISPATCH_FLOAT_HALF_AND_BFLOAT(
|
| 233 |
+
input.scalar_type(),
|
| 234 |
+
"dispatch anti alias activation_forward",
|
| 235 |
+
dispatch_anti_alias_activation_forward<scalar_t, scalar_t, float>(
|
| 236 |
+
reinterpret_cast<scalar_t *>(anti_alias_activation_results_ptr),
|
| 237 |
+
reinterpret_cast<const scalar_t *>(input_ptr),
|
| 238 |
+
reinterpret_cast<const scalar_t *>(up_filter_ptr),
|
| 239 |
+
reinterpret_cast<const scalar_t *>(down_filter_ptr),
|
| 240 |
+
reinterpret_cast<const scalar_t *>(alpha_ptr),
|
| 241 |
+
reinterpret_cast<const scalar_t *>(beta_ptr),
|
| 242 |
+
batches,
|
| 243 |
+
channels,
|
| 244 |
+
seq_len););
|
| 245 |
+
return anti_alias_activation_results;
|
| 246 |
+
}
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*This code is copied fron NVIDIA apex:
|
| 18 |
+
* https://github.com/NVIDIA/apex
|
| 19 |
+
* with minor changes. */
|
| 20 |
+
|
| 21 |
+
#ifndef TORCH_CHECK
|
| 22 |
+
#define TORCH_CHECK AT_CHECK
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#ifdef VERSION_GE_1_3
|
| 26 |
+
#define DATA_PTR data_ptr
|
| 27 |
+
#else
|
| 28 |
+
#define DATA_PTR data
|
| 29 |
+
#endif
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 NVIDIA CORPORATION.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import pathlib
|
| 6 |
+
import subprocess
|
| 7 |
+
|
| 8 |
+
from torch.utils import cpp_extension
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels.
|
| 12 |
+
Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below
|
| 13 |
+
"""
|
| 14 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = ""
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def load():
|
| 18 |
+
# Check if cuda 11 is installed for compute capability 8.0
|
| 19 |
+
cc_flag = []
|
| 20 |
+
_, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
|
| 21 |
+
if int(bare_metal_major) >= 11:
|
| 22 |
+
cc_flag.append("-gencode")
|
| 23 |
+
cc_flag.append("arch=compute_80,code=sm_80")
|
| 24 |
+
|
| 25 |
+
# Build path
|
| 26 |
+
srcpath = pathlib.Path(__file__).parent.absolute()
|
| 27 |
+
buildpath = srcpath / "build"
|
| 28 |
+
_create_build_dir(buildpath)
|
| 29 |
+
|
| 30 |
+
# Helper function to build the kernels.
|
| 31 |
+
def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
|
| 32 |
+
return cpp_extension.load(
|
| 33 |
+
name=name,
|
| 34 |
+
sources=sources,
|
| 35 |
+
build_directory=buildpath,
|
| 36 |
+
extra_cflags=[
|
| 37 |
+
"-O3",
|
| 38 |
+
],
|
| 39 |
+
extra_cuda_cflags=[
|
| 40 |
+
"-O3",
|
| 41 |
+
"-gencode",
|
| 42 |
+
"arch=compute_70,code=sm_70",
|
| 43 |
+
"--use_fast_math",
|
| 44 |
+
]
|
| 45 |
+
+ extra_cuda_flags
|
| 46 |
+
+ cc_flag,
|
| 47 |
+
verbose=True,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
extra_cuda_flags = [
|
| 51 |
+
"-U__CUDA_NO_HALF_OPERATORS__",
|
| 52 |
+
"-U__CUDA_NO_HALF_CONVERSIONS__",
|
| 53 |
+
"--expt-relaxed-constexpr",
|
| 54 |
+
"--expt-extended-lambda",
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
sources = [
|
| 58 |
+
srcpath / "anti_alias_activation.cpp",
|
| 59 |
+
srcpath / "anti_alias_activation_cuda.cu",
|
| 60 |
+
]
|
| 61 |
+
anti_alias_activation_cuda = _cpp_extention_load_helper(
|
| 62 |
+
"anti_alias_activation_cuda", sources, extra_cuda_flags
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
return anti_alias_activation_cuda
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _get_cuda_bare_metal_version(cuda_dir):
|
| 69 |
+
raw_output = subprocess.check_output(
|
| 70 |
+
[cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
|
| 71 |
+
)
|
| 72 |
+
output = raw_output.split()
|
| 73 |
+
release_idx = output.index("release") + 1
|
| 74 |
+
release = output[release_idx].split(".")
|
| 75 |
+
bare_metal_major = release[0]
|
| 76 |
+
bare_metal_minor = release[1][0]
|
| 77 |
+
|
| 78 |
+
return raw_output, bare_metal_major, bare_metal_minor
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _create_build_dir(buildpath):
|
| 82 |
+
try:
|
| 83 |
+
os.mkdir(buildpath)
|
| 84 |
+
except OSError:
|
| 85 |
+
if not os.path.isdir(buildpath):
|
| 86 |
+
print(f"Creation of the build directory {buildpath} failed")
|
mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#include <ATen/ATen.h>
|
| 18 |
+
#include "compat.h"
|
| 19 |
+
|
| 20 |
+
#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \
|
| 21 |
+
switch (TYPE) \
|
| 22 |
+
{ \
|
| 23 |
+
case at::ScalarType::Float: \
|
| 24 |
+
{ \
|
| 25 |
+
using scalar_t = float; \
|
| 26 |
+
__VA_ARGS__; \
|
| 27 |
+
break; \
|
| 28 |
+
} \
|
| 29 |
+
case at::ScalarType::Half: \
|
| 30 |
+
{ \
|
| 31 |
+
using scalar_t = at::Half; \
|
| 32 |
+
__VA_ARGS__; \
|
| 33 |
+
break; \
|
| 34 |
+
} \
|
| 35 |
+
case at::ScalarType::BFloat16: \
|
| 36 |
+
{ \
|
| 37 |
+
using scalar_t = at::BFloat16; \
|
| 38 |
+
__VA_ARGS__; \
|
| 39 |
+
break; \
|
| 40 |
+
} \
|
| 41 |
+
default: \
|
| 42 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \
|
| 46 |
+
switch (TYPEIN) \
|
| 47 |
+
{ \
|
| 48 |
+
case at::ScalarType::Float: \
|
| 49 |
+
{ \
|
| 50 |
+
using scalar_t_in = float; \
|
| 51 |
+
switch (TYPEOUT) \
|
| 52 |
+
{ \
|
| 53 |
+
case at::ScalarType::Float: \
|
| 54 |
+
{ \
|
| 55 |
+
using scalar_t_out = float; \
|
| 56 |
+
__VA_ARGS__; \
|
| 57 |
+
break; \
|
| 58 |
+
} \
|
| 59 |
+
case at::ScalarType::Half: \
|
| 60 |
+
{ \
|
| 61 |
+
using scalar_t_out = at::Half; \
|
| 62 |
+
__VA_ARGS__; \
|
| 63 |
+
break; \
|
| 64 |
+
} \
|
| 65 |
+
case at::ScalarType::BFloat16: \
|
| 66 |
+
{ \
|
| 67 |
+
using scalar_t_out = at::BFloat16; \
|
| 68 |
+
__VA_ARGS__; \
|
| 69 |
+
break; \
|
| 70 |
+
} \
|
| 71 |
+
default: \
|
| 72 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \
|
| 73 |
+
} \
|
| 74 |
+
break; \
|
| 75 |
+
} \
|
| 76 |
+
case at::ScalarType::Half: \
|
| 77 |
+
{ \
|
| 78 |
+
using scalar_t_in = at::Half; \
|
| 79 |
+
using scalar_t_out = at::Half; \
|
| 80 |
+
__VA_ARGS__; \
|
| 81 |
+
break; \
|
| 82 |
+
} \
|
| 83 |
+
case at::ScalarType::BFloat16: \
|
| 84 |
+
{ \
|
| 85 |
+
using scalar_t_in = at::BFloat16; \
|
| 86 |
+
using scalar_t_out = at::BFloat16; \
|
| 87 |
+
__VA_ARGS__; \
|
| 88 |
+
break; \
|
| 89 |
+
} \
|
| 90 |
+
default: \
|
| 91 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \
|
| 92 |
+
}
|
mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
from .filter import *
|
| 5 |
+
from .resample import *
|
| 6 |
+
from .act import *
|
mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.resample import (DownSample1d, UpSample1d)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Activation1d(nn.Module):
|
| 10 |
+
|
| 11 |
+
def __init__(
|
| 12 |
+
self,
|
| 13 |
+
activation,
|
| 14 |
+
up_ratio: int = 2,
|
| 15 |
+
down_ratio: int = 2,
|
| 16 |
+
up_kernel_size: int = 12,
|
| 17 |
+
down_kernel_size: int = 12,
|
| 18 |
+
):
|
| 19 |
+
super().__init__()
|
| 20 |
+
self.up_ratio = up_ratio
|
| 21 |
+
self.down_ratio = down_ratio
|
| 22 |
+
self.act = activation
|
| 23 |
+
self.upsample = UpSample1d(up_ratio, up_kernel_size)
|
| 24 |
+
self.downsample = DownSample1d(down_ratio, down_kernel_size)
|
| 25 |
+
|
| 26 |
+
# x: [B,C,T]
|
| 27 |
+
def forward(self, x):
|
| 28 |
+
x = self.upsample(x)
|
| 29 |
+
x = self.act(x)
|
| 30 |
+
x = self.downsample(x)
|
| 31 |
+
|
| 32 |
+
return x
|
mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import math
|
| 8 |
+
|
| 9 |
+
if "sinc" in dir(torch):
|
| 10 |
+
sinc = torch.sinc
|
| 11 |
+
else:
|
| 12 |
+
# This code is adopted from adefossez's julius.core.sinc under the MIT License
|
| 13 |
+
# https://adefossez.github.io/julius/julius/core.html
|
| 14 |
+
# LICENSE is in incl_licenses directory.
|
| 15 |
+
def sinc(x: torch.Tensor):
|
| 16 |
+
"""
|
| 17 |
+
Implementation of sinc, i.e. sin(pi * x) / (pi * x)
|
| 18 |
+
__Warning__: Different to julius.sinc, the input is multiplied by `pi`!
|
| 19 |
+
"""
|
| 20 |
+
return torch.where(
|
| 21 |
+
x == 0,
|
| 22 |
+
torch.tensor(1.0, device=x.device, dtype=x.dtype),
|
| 23 |
+
torch.sin(math.pi * x) / math.pi / x,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
|
| 28 |
+
# https://adefossez.github.io/julius/julius/lowpass.html
|
| 29 |
+
# LICENSE is in incl_licenses directory.
|
| 30 |
+
def kaiser_sinc_filter1d(
|
| 31 |
+
cutoff, half_width, kernel_size
|
| 32 |
+
): # return filter [1,1,kernel_size]
|
| 33 |
+
even = kernel_size % 2 == 0
|
| 34 |
+
half_size = kernel_size // 2
|
| 35 |
+
|
| 36 |
+
# For kaiser window
|
| 37 |
+
delta_f = 4 * half_width
|
| 38 |
+
A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
|
| 39 |
+
if A > 50.0:
|
| 40 |
+
beta = 0.1102 * (A - 8.7)
|
| 41 |
+
elif A >= 21.0:
|
| 42 |
+
beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0)
|
| 43 |
+
else:
|
| 44 |
+
beta = 0.0
|
| 45 |
+
window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
|
| 46 |
+
|
| 47 |
+
# ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
|
| 48 |
+
if even:
|
| 49 |
+
time = torch.arange(-half_size, half_size) + 0.5
|
| 50 |
+
else:
|
| 51 |
+
time = torch.arange(kernel_size) - half_size
|
| 52 |
+
if cutoff == 0:
|
| 53 |
+
filter_ = torch.zeros_like(time)
|
| 54 |
+
else:
|
| 55 |
+
filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
|
| 56 |
+
"""
|
| 57 |
+
Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal.
|
| 58 |
+
"""
|
| 59 |
+
filter_ /= filter_.sum()
|
| 60 |
+
filter = filter_.view(1, 1, kernel_size)
|
| 61 |
+
|
| 62 |
+
return filter
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class LowPassFilter1d(nn.Module):
|
| 66 |
+
def __init__(
|
| 67 |
+
self,
|
| 68 |
+
cutoff=0.5,
|
| 69 |
+
half_width=0.6,
|
| 70 |
+
stride: int = 1,
|
| 71 |
+
padding: bool = True,
|
| 72 |
+
padding_mode: str = "replicate",
|
| 73 |
+
kernel_size: int = 12,
|
| 74 |
+
):
|
| 75 |
+
"""
|
| 76 |
+
kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible.
|
| 77 |
+
"""
|
| 78 |
+
super().__init__()
|
| 79 |
+
if cutoff < -0.0:
|
| 80 |
+
raise ValueError("Minimum cutoff must be larger than zero.")
|
| 81 |
+
if cutoff > 0.5:
|
| 82 |
+
raise ValueError("A cutoff above 0.5 does not make sense.")
|
| 83 |
+
self.kernel_size = kernel_size
|
| 84 |
+
self.even = kernel_size % 2 == 0
|
| 85 |
+
self.pad_left = kernel_size // 2 - int(self.even)
|
| 86 |
+
self.pad_right = kernel_size // 2
|
| 87 |
+
self.stride = stride
|
| 88 |
+
self.padding = padding
|
| 89 |
+
self.padding_mode = padding_mode
|
| 90 |
+
filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
|
| 91 |
+
self.register_buffer("filter", filter)
|
| 92 |
+
|
| 93 |
+
# Input [B, C, T]
|
| 94 |
+
def forward(self, x):
|
| 95 |
+
_, C, _ = x.shape
|
| 96 |
+
|
| 97 |
+
if self.padding:
|
| 98 |
+
x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode)
|
| 99 |
+
out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
|
| 100 |
+
|
| 101 |
+
return out
|
mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.filter import (LowPassFilter1d,
|
| 8 |
+
kaiser_sinc_filter1d)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class UpSample1d(nn.Module):
|
| 12 |
+
|
| 13 |
+
def __init__(self, ratio=2, kernel_size=None):
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.ratio = ratio
|
| 16 |
+
self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size)
|
| 17 |
+
self.stride = ratio
|
| 18 |
+
self.pad = self.kernel_size // ratio - 1
|
| 19 |
+
self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
|
| 20 |
+
self.pad_right = (self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2)
|
| 21 |
+
filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio,
|
| 22 |
+
half_width=0.6 / ratio,
|
| 23 |
+
kernel_size=self.kernel_size)
|
| 24 |
+
self.register_buffer("filter", filter)
|
| 25 |
+
|
| 26 |
+
# x: [B, C, T]
|
| 27 |
+
def forward(self, x):
|
| 28 |
+
_, C, _ = x.shape
|
| 29 |
+
|
| 30 |
+
x = F.pad(x, (self.pad, self.pad), mode="replicate")
|
| 31 |
+
x = self.ratio * F.conv_transpose1d(
|
| 32 |
+
x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
|
| 33 |
+
x = x[..., self.pad_left:-self.pad_right]
|
| 34 |
+
|
| 35 |
+
return x
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DownSample1d(nn.Module):
|
| 39 |
+
|
| 40 |
+
def __init__(self, ratio=2, kernel_size=None):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.ratio = ratio
|
| 43 |
+
self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size)
|
| 44 |
+
self.lowpass = LowPassFilter1d(
|
| 45 |
+
cutoff=0.5 / ratio,
|
| 46 |
+
half_width=0.6 / ratio,
|
| 47 |
+
stride=ratio,
|
| 48 |
+
kernel_size=self.kernel_size,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
def forward(self, x):
|
| 52 |
+
xx = self.lowpass(x)
|
| 53 |
+
|
| 54 |
+
return xx
|
mmaudio/ext/bigvgan_v2/bigvgan.py
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 NVIDIA CORPORATION.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
|
| 4 |
+
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
|
| 5 |
+
# LICENSE is in incl_licenses directory.
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, Optional, Union
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn as nn
|
| 14 |
+
from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
|
| 15 |
+
from torch.nn import Conv1d, ConvTranspose1d
|
| 16 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 17 |
+
from torch.nn.utils.parametrize import remove_parametrizations
|
| 18 |
+
|
| 19 |
+
from mmaudio.ext.bigvgan_v2 import activations
|
| 20 |
+
from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.act import \
|
| 21 |
+
Activation1d as TorchActivation1d
|
| 22 |
+
from mmaudio.ext.bigvgan_v2.env import AttrDict
|
| 23 |
+
from mmaudio.ext.bigvgan_v2.utils import get_padding, init_weights
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def load_hparams_from_json(path) -> AttrDict:
|
| 27 |
+
with open(path) as f:
|
| 28 |
+
data = f.read()
|
| 29 |
+
return AttrDict(json.loads(data))
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class AMPBlock1(torch.nn.Module):
|
| 33 |
+
"""
|
| 34 |
+
AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
|
| 35 |
+
AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
h (AttrDict): Hyperparameters.
|
| 39 |
+
channels (int): Number of convolution channels.
|
| 40 |
+
kernel_size (int): Size of the convolution kernel. Default is 3.
|
| 41 |
+
dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
|
| 42 |
+
activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
h: AttrDict,
|
| 48 |
+
channels: int,
|
| 49 |
+
kernel_size: int = 3,
|
| 50 |
+
dilation: tuple = (1, 3, 5),
|
| 51 |
+
activation: str = None,
|
| 52 |
+
):
|
| 53 |
+
super().__init__()
|
| 54 |
+
|
| 55 |
+
self.h = h
|
| 56 |
+
|
| 57 |
+
self.convs1 = nn.ModuleList([
|
| 58 |
+
weight_norm(
|
| 59 |
+
Conv1d(
|
| 60 |
+
channels,
|
| 61 |
+
channels,
|
| 62 |
+
kernel_size,
|
| 63 |
+
stride=1,
|
| 64 |
+
dilation=d,
|
| 65 |
+
padding=get_padding(kernel_size, d),
|
| 66 |
+
)) for d in dilation
|
| 67 |
+
])
|
| 68 |
+
self.convs1.apply(init_weights)
|
| 69 |
+
|
| 70 |
+
self.convs2 = nn.ModuleList([
|
| 71 |
+
weight_norm(
|
| 72 |
+
Conv1d(
|
| 73 |
+
channels,
|
| 74 |
+
channels,
|
| 75 |
+
kernel_size,
|
| 76 |
+
stride=1,
|
| 77 |
+
dilation=1,
|
| 78 |
+
padding=get_padding(kernel_size, 1),
|
| 79 |
+
)) for _ in range(len(dilation))
|
| 80 |
+
])
|
| 81 |
+
self.convs2.apply(init_weights)
|
| 82 |
+
|
| 83 |
+
self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers
|
| 84 |
+
|
| 85 |
+
# Select which Activation1d, lazy-load cuda version to ensure backward compatibility
|
| 86 |
+
if self.h.get("use_cuda_kernel", False):
|
| 87 |
+
from alias_free_activation.cuda.activation1d import \
|
| 88 |
+
Activation1d as CudaActivation1d
|
| 89 |
+
|
| 90 |
+
Activation1d = CudaActivation1d
|
| 91 |
+
else:
|
| 92 |
+
Activation1d = TorchActivation1d
|
| 93 |
+
|
| 94 |
+
# Activation functions
|
| 95 |
+
if activation == "snake":
|
| 96 |
+
self.activations = nn.ModuleList([
|
| 97 |
+
Activation1d(
|
| 98 |
+
activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
|
| 99 |
+
for _ in range(self.num_layers)
|
| 100 |
+
])
|
| 101 |
+
elif activation == "snakebeta":
|
| 102 |
+
self.activations = nn.ModuleList([
|
| 103 |
+
Activation1d(
|
| 104 |
+
activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
|
| 105 |
+
for _ in range(self.num_layers)
|
| 106 |
+
])
|
| 107 |
+
else:
|
| 108 |
+
raise NotImplementedError(
|
| 109 |
+
"activation incorrectly specified. check the config file and look for 'activation'."
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def forward(self, x):
|
| 113 |
+
acts1, acts2 = self.activations[::2], self.activations[1::2]
|
| 114 |
+
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
|
| 115 |
+
xt = a1(x)
|
| 116 |
+
xt = c1(xt)
|
| 117 |
+
xt = a2(xt)
|
| 118 |
+
xt = c2(xt)
|
| 119 |
+
x = xt + x
|
| 120 |
+
|
| 121 |
+
return x
|
| 122 |
+
|
| 123 |
+
def remove_weight_norm(self):
|
| 124 |
+
for l in self.convs1:
|
| 125 |
+
remove_parametrizations(l, 'weight')
|
| 126 |
+
for l in self.convs2:
|
| 127 |
+
remove_parametrizations(l, 'weight')
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class AMPBlock2(torch.nn.Module):
|
| 131 |
+
"""
|
| 132 |
+
AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
|
| 133 |
+
Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
h (AttrDict): Hyperparameters.
|
| 137 |
+
channels (int): Number of convolution channels.
|
| 138 |
+
kernel_size (int): Size of the convolution kernel. Default is 3.
|
| 139 |
+
dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
|
| 140 |
+
activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
def __init__(
|
| 144 |
+
self,
|
| 145 |
+
h: AttrDict,
|
| 146 |
+
channels: int,
|
| 147 |
+
kernel_size: int = 3,
|
| 148 |
+
dilation: tuple = (1, 3, 5),
|
| 149 |
+
activation: str = None,
|
| 150 |
+
):
|
| 151 |
+
super().__init__()
|
| 152 |
+
|
| 153 |
+
self.h = h
|
| 154 |
+
|
| 155 |
+
self.convs = nn.ModuleList([
|
| 156 |
+
weight_norm(
|
| 157 |
+
Conv1d(
|
| 158 |
+
channels,
|
| 159 |
+
channels,
|
| 160 |
+
kernel_size,
|
| 161 |
+
stride=1,
|
| 162 |
+
dilation=d,
|
| 163 |
+
padding=get_padding(kernel_size, d),
|
| 164 |
+
)) for d in dilation
|
| 165 |
+
])
|
| 166 |
+
self.convs.apply(init_weights)
|
| 167 |
+
|
| 168 |
+
self.num_layers = len(self.convs) # Total number of conv layers
|
| 169 |
+
|
| 170 |
+
# Select which Activation1d, lazy-load cuda version to ensure backward compatibility
|
| 171 |
+
if self.h.get("use_cuda_kernel", False):
|
| 172 |
+
from alias_free_activation.cuda.activation1d import \
|
| 173 |
+
Activation1d as CudaActivation1d
|
| 174 |
+
|
| 175 |
+
Activation1d = CudaActivation1d
|
| 176 |
+
else:
|
| 177 |
+
Activation1d = TorchActivation1d
|
| 178 |
+
|
| 179 |
+
# Activation functions
|
| 180 |
+
if activation == "snake":
|
| 181 |
+
self.activations = nn.ModuleList([
|
| 182 |
+
Activation1d(
|
| 183 |
+
activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
|
| 184 |
+
for _ in range(self.num_layers)
|
| 185 |
+
])
|
| 186 |
+
elif activation == "snakebeta":
|
| 187 |
+
self.activations = nn.ModuleList([
|
| 188 |
+
Activation1d(
|
| 189 |
+
activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
|
| 190 |
+
for _ in range(self.num_layers)
|
| 191 |
+
])
|
| 192 |
+
else:
|
| 193 |
+
raise NotImplementedError(
|
| 194 |
+
"activation incorrectly specified. check the config file and look for 'activation'."
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
def forward(self, x):
|
| 198 |
+
for c, a in zip(self.convs, self.activations):
|
| 199 |
+
xt = a(x)
|
| 200 |
+
xt = c(xt)
|
| 201 |
+
x = xt + x
|
| 202 |
+
return x
|
| 203 |
+
|
| 204 |
+
def remove_weight_norm(self):
|
| 205 |
+
for l in self.convs:
|
| 206 |
+
remove_weight_norm(l)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
class BigVGAN(
|
| 210 |
+
torch.nn.Module,
|
| 211 |
+
PyTorchModelHubMixin,
|
| 212 |
+
library_name="bigvgan",
|
| 213 |
+
repo_url="https://github.com/NVIDIA/BigVGAN",
|
| 214 |
+
docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
|
| 215 |
+
pipeline_tag="audio-to-audio",
|
| 216 |
+
license="mit",
|
| 217 |
+
tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
|
| 218 |
+
):
|
| 219 |
+
"""
|
| 220 |
+
BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
|
| 221 |
+
New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
h (AttrDict): Hyperparameters.
|
| 225 |
+
use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
|
| 226 |
+
|
| 227 |
+
Note:
|
| 228 |
+
- The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
|
| 229 |
+
- Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
|
| 233 |
+
super().__init__()
|
| 234 |
+
self.h = h
|
| 235 |
+
self.h["use_cuda_kernel"] = use_cuda_kernel
|
| 236 |
+
|
| 237 |
+
# Select which Activation1d, lazy-load cuda version to ensure backward compatibility
|
| 238 |
+
if self.h.get("use_cuda_kernel", False):
|
| 239 |
+
from alias_free_activation.cuda.activation1d import \
|
| 240 |
+
Activation1d as CudaActivation1d
|
| 241 |
+
|
| 242 |
+
Activation1d = CudaActivation1d
|
| 243 |
+
else:
|
| 244 |
+
Activation1d = TorchActivation1d
|
| 245 |
+
|
| 246 |
+
self.num_kernels = len(h.resblock_kernel_sizes)
|
| 247 |
+
self.num_upsamples = len(h.upsample_rates)
|
| 248 |
+
|
| 249 |
+
# Pre-conv
|
| 250 |
+
self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
|
| 251 |
+
|
| 252 |
+
# Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
|
| 253 |
+
if h.resblock == "1":
|
| 254 |
+
resblock_class = AMPBlock1
|
| 255 |
+
elif h.resblock == "2":
|
| 256 |
+
resblock_class = AMPBlock2
|
| 257 |
+
else:
|
| 258 |
+
raise ValueError(
|
| 259 |
+
f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}")
|
| 260 |
+
|
| 261 |
+
# Transposed conv-based upsamplers. does not apply anti-aliasing
|
| 262 |
+
self.ups = nn.ModuleList()
|
| 263 |
+
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
|
| 264 |
+
self.ups.append(
|
| 265 |
+
nn.ModuleList([
|
| 266 |
+
weight_norm(
|
| 267 |
+
ConvTranspose1d(
|
| 268 |
+
h.upsample_initial_channel // (2**i),
|
| 269 |
+
h.upsample_initial_channel // (2**(i + 1)),
|
| 270 |
+
k,
|
| 271 |
+
u,
|
| 272 |
+
padding=(k - u) // 2,
|
| 273 |
+
))
|
| 274 |
+
]))
|
| 275 |
+
|
| 276 |
+
# Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
|
| 277 |
+
self.resblocks = nn.ModuleList()
|
| 278 |
+
for i in range(len(self.ups)):
|
| 279 |
+
ch = h.upsample_initial_channel // (2**(i + 1))
|
| 280 |
+
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
|
| 281 |
+
self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation))
|
| 282 |
+
|
| 283 |
+
# Post-conv
|
| 284 |
+
activation_post = (activations.Snake(ch, alpha_logscale=h.snake_logscale)
|
| 285 |
+
if h.activation == "snake" else
|
| 286 |
+
(activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
|
| 287 |
+
if h.activation == "snakebeta" else None))
|
| 288 |
+
if activation_post is None:
|
| 289 |
+
raise NotImplementedError(
|
| 290 |
+
"activation incorrectly specified. check the config file and look for 'activation'."
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
self.activation_post = Activation1d(activation=activation_post)
|
| 294 |
+
|
| 295 |
+
# Whether to use bias for the final conv_post. Default to True for backward compatibility
|
| 296 |
+
self.use_bias_at_final = h.get("use_bias_at_final", True)
|
| 297 |
+
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final))
|
| 298 |
+
|
| 299 |
+
# Weight initialization
|
| 300 |
+
for i in range(len(self.ups)):
|
| 301 |
+
self.ups[i].apply(init_weights)
|
| 302 |
+
self.conv_post.apply(init_weights)
|
| 303 |
+
|
| 304 |
+
# Final tanh activation. Defaults to True for backward compatibility
|
| 305 |
+
self.use_tanh_at_final = h.get("use_tanh_at_final", True)
|
| 306 |
+
|
| 307 |
+
def forward(self, x):
|
| 308 |
+
# Pre-conv
|
| 309 |
+
x = self.conv_pre(x)
|
| 310 |
+
|
| 311 |
+
for i in range(self.num_upsamples):
|
| 312 |
+
# Upsampling
|
| 313 |
+
for i_up in range(len(self.ups[i])):
|
| 314 |
+
x = self.ups[i][i_up](x)
|
| 315 |
+
# AMP blocks
|
| 316 |
+
xs = None
|
| 317 |
+
for j in range(self.num_kernels):
|
| 318 |
+
if xs is None:
|
| 319 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
| 320 |
+
else:
|
| 321 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
| 322 |
+
x = xs / self.num_kernels
|
| 323 |
+
|
| 324 |
+
# Post-conv
|
| 325 |
+
x = self.activation_post(x)
|
| 326 |
+
x = self.conv_post(x)
|
| 327 |
+
# Final tanh activation
|
| 328 |
+
if self.use_tanh_at_final:
|
| 329 |
+
x = torch.tanh(x)
|
| 330 |
+
else:
|
| 331 |
+
x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
|
| 332 |
+
|
| 333 |
+
return x
|
| 334 |
+
|
| 335 |
+
def remove_weight_norm(self):
|
| 336 |
+
try:
|
| 337 |
+
print("Removing weight norm...")
|
| 338 |
+
for l in self.ups:
|
| 339 |
+
for l_i in l:
|
| 340 |
+
remove_parametrizations(l_i, 'weight')
|
| 341 |
+
for l in self.resblocks:
|
| 342 |
+
l.remove_weight_norm()
|
| 343 |
+
remove_parametrizations(self.conv_pre, 'weight')
|
| 344 |
+
remove_parametrizations(self.conv_post, 'weight')
|
| 345 |
+
except ValueError:
|
| 346 |
+
print("[INFO] Model already removed weight norm. Skipping!")
|
| 347 |
+
pass
|
| 348 |
+
|
| 349 |
+
# Additional methods for huggingface_hub support
|
| 350 |
+
def _save_pretrained(self, save_directory: Path) -> None:
|
| 351 |
+
"""Save weights and config.json from a Pytorch model to a local directory."""
|
| 352 |
+
|
| 353 |
+
model_path = save_directory / "bigvgan_generator.pt"
|
| 354 |
+
torch.save({"generator": self.state_dict()}, model_path)
|
| 355 |
+
|
| 356 |
+
config_path = save_directory / "config.json"
|
| 357 |
+
with open(config_path, "w") as config_file:
|
| 358 |
+
json.dump(self.h, config_file, indent=4)
|
| 359 |
+
|
| 360 |
+
@classmethod
|
| 361 |
+
def _from_pretrained(
|
| 362 |
+
cls,
|
| 363 |
+
*,
|
| 364 |
+
model_id: str,
|
| 365 |
+
revision: str,
|
| 366 |
+
cache_dir: str,
|
| 367 |
+
force_download: bool,
|
| 368 |
+
proxies: Optional[Dict],
|
| 369 |
+
resume_download: bool,
|
| 370 |
+
local_files_only: bool,
|
| 371 |
+
token: Union[str, bool, None],
|
| 372 |
+
map_location: str = "cpu", # Additional argument
|
| 373 |
+
strict: bool = False, # Additional argument
|
| 374 |
+
use_cuda_kernel: bool = False,
|
| 375 |
+
**model_kwargs,
|
| 376 |
+
):
|
| 377 |
+
"""Load Pytorch pretrained weights and return the loaded model."""
|
| 378 |
+
|
| 379 |
+
# Download and load hyperparameters (h) used by BigVGAN
|
| 380 |
+
if os.path.isdir(model_id):
|
| 381 |
+
print("Loading config.json from local directory")
|
| 382 |
+
config_file = os.path.join(model_id, "config.json")
|
| 383 |
+
else:
|
| 384 |
+
config_file = hf_hub_download(
|
| 385 |
+
repo_id=model_id,
|
| 386 |
+
filename="config.json",
|
| 387 |
+
revision=revision,
|
| 388 |
+
cache_dir=cache_dir,
|
| 389 |
+
force_download=force_download,
|
| 390 |
+
proxies=proxies,
|
| 391 |
+
resume_download=resume_download,
|
| 392 |
+
token=token,
|
| 393 |
+
local_files_only=local_files_only,
|
| 394 |
+
)
|
| 395 |
+
h = load_hparams_from_json(config_file)
|
| 396 |
+
|
| 397 |
+
# instantiate BigVGAN using h
|
| 398 |
+
if use_cuda_kernel:
|
| 399 |
+
print(
|
| 400 |
+
f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
|
| 401 |
+
)
|
| 402 |
+
print(
|
| 403 |
+
f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
|
| 404 |
+
)
|
| 405 |
+
print(
|
| 406 |
+
f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
|
| 407 |
+
)
|
| 408 |
+
model = cls(h, use_cuda_kernel=use_cuda_kernel)
|
| 409 |
+
|
| 410 |
+
# Download and load pretrained generator weight
|
| 411 |
+
if os.path.isdir(model_id):
|
| 412 |
+
print("Loading weights from local directory")
|
| 413 |
+
model_file = os.path.join(model_id, "bigvgan_generator.pt")
|
| 414 |
+
else:
|
| 415 |
+
print(f"Loading weights from {model_id}")
|
| 416 |
+
model_file = hf_hub_download(
|
| 417 |
+
repo_id=model_id,
|
| 418 |
+
filename="bigvgan_generator.pt",
|
| 419 |
+
revision=revision,
|
| 420 |
+
cache_dir=cache_dir,
|
| 421 |
+
force_download=force_download,
|
| 422 |
+
proxies=proxies,
|
| 423 |
+
resume_download=resume_download,
|
| 424 |
+
token=token,
|
| 425 |
+
local_files_only=local_files_only,
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
checkpoint_dict = torch.load(model_file, map_location=map_location, weights_only=True)
|
| 429 |
+
|
| 430 |
+
try:
|
| 431 |
+
model.load_state_dict(checkpoint_dict["generator"])
|
| 432 |
+
except RuntimeError:
|
| 433 |
+
print(
|
| 434 |
+
f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
|
| 435 |
+
)
|
| 436 |
+
model.remove_weight_norm()
|
| 437 |
+
model.load_state_dict(checkpoint_dict["generator"])
|
| 438 |
+
|
| 439 |
+
return model
|
mmaudio/ext/bigvgan_v2/env.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
|
| 2 |
+
# LICENSE is in incl_licenses directory.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import shutil
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class AttrDict(dict):
|
| 9 |
+
def __init__(self, *args, **kwargs):
|
| 10 |
+
super(AttrDict, self).__init__(*args, **kwargs)
|
| 11 |
+
self.__dict__ = self
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_env(config, config_name, path):
|
| 15 |
+
t_path = os.path.join(path, config_name)
|
| 16 |
+
if config != t_path:
|
| 17 |
+
os.makedirs(path, exist_ok=True)
|
| 18 |
+
shutil.copyfile(config, os.path.join(path, config_name))
|
mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2020 Jungil Kong
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2020 Edward Dixon
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|