LPX55 commited on
Commit
d3ae1b2
·
verified ·
1 Parent(s): 309881d

Update raw.py

Browse files
Files changed (1) hide show
  1. raw.py +0 -36
raw.py CHANGED
@@ -44,37 +44,6 @@ pipe = FluxControlNetPipeline.from_pretrained(
44
  # adapter_id3 = "enhanceaiteam/Flux-uncensored-v2"
45
 
46
  pipe.to("cuda")
47
- try:
48
- apply_group_offloading(
49
- pipe.transformer,
50
- offload_type="leaf_level",
51
- offload_device=torch.device("cpu"),
52
- onload_device=torch.device("cuda"),
53
- use_stream=True,
54
- )
55
- apply_group_offloading(
56
- pipe.text_encoder,
57
- offload_device=torch.device("cpu"),
58
- onload_device=torch.device("cuda"),
59
- offload_type="leaf_level",
60
- use_stream=True,
61
- )
62
- apply_group_offloading(
63
- pipe.text_encoder_2,
64
- offload_device=torch.device("cpu"),
65
- onload_device=torch.device("cuda"),
66
- offload_type="leaf_level",
67
- use_stream=True,
68
- )
69
- apply_group_offloading(
70
- pipe.vae,
71
- offload_device=torch.device("cpu"),
72
- onload_device=torch.device("cuda"),
73
- offload_type="leaf_level",
74
- use_stream=True,
75
- )
76
- except:
77
- print("debug-group")
78
 
79
  try:
80
  pipe.enable_sequential_cpu_offload()
@@ -88,11 +57,6 @@ try:
88
  pipe.vae.enable_tiling()
89
  except:
90
  print("debug-3")
91
- try:
92
- pipe.enable_xformers_memory_efficient_attention()
93
- except:
94
- print("debug-4")
95
-
96
 
97
  # pipe.load_lora_weights(adapter_id, adapter_name="turbo")
98
  # pipe.load_lora_weights(adapter_id2, adapter_name="real")
 
44
  # adapter_id3 = "enhanceaiteam/Flux-uncensored-v2"
45
 
46
  pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  try:
49
  pipe.enable_sequential_cpu_offload()
 
57
  pipe.vae.enable_tiling()
58
  except:
59
  print("debug-3")
 
 
 
 
 
60
 
61
  # pipe.load_lora_weights(adapter_id, adapter_name="turbo")
62
  # pipe.load_lora_weights(adapter_id2, adapter_name="real")