1inkusFace commited on
Commit
9ee1fa9
·
verified ·
1 Parent(s): 5fb4e8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -19
app.py CHANGED
@@ -8,22 +8,35 @@
8
  import spaces
9
  import os
10
 
11
- os.putenv('PYTORCH_NVML_BASED_CUDA_CHECK','1')
12
- os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
13
- alloc_conf_parts = [
14
- 'expandable_segments:True',
15
- 'pinned_use_background_threads:True' # Specific to pinned memory.
16
- ]
17
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = ','.join(alloc_conf_parts)
18
  os.environ["SAFETENSORS_FAST_GPU"] = "1"
19
- os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  import random
22
  import uuid
23
  import gradio as gr
24
  import numpy as np
25
  from PIL import Image
26
- import torch
27
  from typing import Tuple
28
  import paramiko
29
  import datetime
@@ -32,16 +45,6 @@ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipelin
32
  from diffusers.models.attention_processor import Attention, AttnProcessor2_0
33
  from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
34
 
35
- torch.backends.cuda.matmul.allow_tf32 = False
36
- torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
37
- torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
38
- torch.backends.cudnn.allow_tf32 = False
39
- torch.backends.cudnn.deterministic = False
40
- torch.backends.cudnn.benchmark = False
41
- torch.backends.cuda.preferred_blas_library="cublas"
42
- torch.backends.cuda.preferred_linalg_library="cusolver"
43
- torch.set_float32_matmul_precision("highest")
44
-
45
  FTP_HOST = 'noahcohn.com'
46
  FTP_USER = 'ford442'
47
  FTP_PASS = os.getenv("FTP_PASS")
 
8
  import spaces
9
  import os
10
 
11
+ os.environ['PYTORCH_NVML_BASED_CUDA_CHECK'] = '1'
12
+ os.environ['TORCH_LINALG_PREFER_CUSOLVER'] = '1'
13
+ os.environ['PYTORCH_ALLOC_CONF'] = 'expandable_segments:True,pinned_use_background_threads:True'
 
 
 
 
14
  os.environ["SAFETENSORS_FAST_GPU"] = "1"
15
+ os.environ['HF_HUB_ENABLE_HF_TRANSFER'] = '1'
16
+
17
+ import torch
18
+
19
+ torch.backends.cuda.matmul.allow_tf32 = False # torch 2.8
20
+ torch.backends.cudnn.allow_tf32 = False # torch 2.8
21
+
22
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
23
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
24
+ #torch.backends.fp32_precision = "ieee" torch 2.9
25
+ #torch.backends.cuda.matmul.fp32_precision = "ieee" torch 2.9
26
+ #torch.backends.cudnn.fp32_precision = "ieee" torch 2.9
27
+ #torch.backends.cudnn.conv.fp32_precision = "ieee" torch 2.9
28
+ #torch.backends.cudnn.rnn.fp32_precision = "ieee" torch 2.9
29
+ torch.backends.cudnn.deterministic = False
30
+ torch.backends.cudnn.benchmark = False
31
+ torch.backends.cuda.preferred_blas_library="cublas"
32
+ torch.backends.cuda.preferred_linalg_library="cusolver"
33
+ torch.set_float32_matmul_precision("highest")
34
 
35
  import random
36
  import uuid
37
  import gradio as gr
38
  import numpy as np
39
  from PIL import Image
 
40
  from typing import Tuple
41
  import paramiko
42
  import datetime
 
45
  from diffusers.models.attention_processor import Attention, AttnProcessor2_0
46
  from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
47
 
 
 
 
 
 
 
 
 
 
 
48
  FTP_HOST = 'noahcohn.com'
49
  FTP_USER = 'ford442'
50
  FTP_PASS = os.getenv("FTP_PASS")