make tf32
Browse files- app.py +4 -1
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -178,7 +178,8 @@ def run_edit(audio_file, caption, num_steps, guidance_scale, guidance_rescale, s
|
|
| 178 |
from omegaconf import OmegaConf
|
| 179 |
from safetensors.torch import load_file
|
| 180 |
import diffusers.schedulers as noise_schedulers
|
| 181 |
-
|
|
|
|
| 182 |
|
| 183 |
|
| 184 |
|
|
@@ -330,8 +331,10 @@ def build_demo():
|
|
| 330 |
return demo
|
| 331 |
|
| 332 |
if __name__ == "__main__":
|
|
|
|
| 333 |
demo = build_demo()
|
| 334 |
port = int(os.environ.get("PORT", "7860"))
|
|
|
|
| 335 |
demo.queue().launch(
|
| 336 |
server_name="0.0.0.0",
|
| 337 |
server_port=port,
|
|
|
|
| 178 |
from omegaconf import OmegaConf
|
| 179 |
from safetensors.torch import load_file
|
| 180 |
import diffusers.schedulers as noise_schedulers
|
| 181 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 182 |
+
torch.backends.cudnn.allow_tf32 = False
|
| 183 |
|
| 184 |
|
| 185 |
|
|
|
|
| 331 |
return demo
|
| 332 |
|
| 333 |
if __name__ == "__main__":
|
| 334 |
+
print("[BOOT] entering main()", flush=True)
|
| 335 |
demo = build_demo()
|
| 336 |
port = int(os.environ.get("PORT", "7860"))
|
| 337 |
+
print(f"[BOOT] launching gradio on 0.0.0.0:{port}", flush=True)
|
| 338 |
demo.queue().launch(
|
| 339 |
server_name="0.0.0.0",
|
| 340 |
server_port=port,
|
requirements.txt
CHANGED
|
@@ -28,4 +28,4 @@ wandb
|
|
| 28 |
tensorboard
|
| 29 |
swanlab
|
| 30 |
|
| 31 |
-
spaces
|
|
|
|
| 28 |
tensorboard
|
| 29 |
swanlab
|
| 30 |
|
| 31 |
+
spaces
|