Mightys commited on
Commit
311c2d7
·
verified ·
1 Parent(s): f4ae472

Upload anina_fp16_patch.py

Browse files
Files changed (1) hide show
  1. scripts/anina_fp16_patch.py +56 -0
scripts/anina_fp16_patch.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A very basic ComfyUI plugin to patch anima, which is based cosmos pt2,
2
+ # to use fp16 "safely" on old GPUs.
3
+ # Only tested for anima.
4
+ # Written by reakaakasky (https://civitai.com/user/reakaakasky).
5
+
6
+ # To clarify, this plugin isn't uploaded to GitHub because it's a very "dirty" plugin.
7
+ # "dirty" means it hot-patches/monkey patches the comfyui core code.
8
+ # This approach is terrible from a programming perspective.
9
+ # But this is the simplest approach I can think of.
10
+
11
+ # How to use:
12
+ # Put this file in the ComfyUI "custom_nodes" dir.
13
+ # Use "ModelComputeDtype" node and set dtype to "fp16".
14
+ # To disable the patch, remove the file, or rename the ".py" suffix, to something
15
+ # like ".disable", whatever.
16
+
17
+ # Version: v1 (2/2/2026)
18
+ # Version: v1.1 (2/2/2026) optimazed for torch.compile
19
+
20
+ import torch
21
+ import logging
22
+
23
+
24
+ logger = logging.getLogger(__name__)
25
+ logger.info("[anima fp16 patch] patch loading")
26
+
27
+ NODE_CLASS_MAPPINGS = {}
28
+
29
+ NODE_DISPLAY_NAME_MAPPINGS = {}
30
+
31
+ import comfy.ldm.cosmos.predict2 as p2
32
+
33
+ ampf16 = torch.autocast("cuda", dtype=torch.float16)
34
+ ampf32 = torch.autocast("cuda", dtype=torch.float32)
35
+
36
+
37
+ def p2_Block_init_patch(self: p2.Block, *args, **kwargs):
38
+ self.__init_org(*args, **kwargs)
39
+
40
+ self.adaln_modulation_self_attn.forward = ampf16(self.adaln_modulation_self_attn.forward)
41
+ self.adaln_modulation_cross_attn.forward = ampf16(self.adaln_modulation_cross_attn.forward)
42
+
43
+ self.self_attn.forward = ampf16(self.self_attn.forward)
44
+ self.cross_attn.forward = ampf16(self.cross_attn.forward)
45
+ self.mlp.forward = ampf16(self.mlp.forward)
46
+
47
+ self.forward = ampf32(self.forward)
48
+
49
+
50
+ p2.Block.__init_org = p2.Block.__init__
51
+ p2.Block.__init__ = p2_Block_init_patch
52
+
53
+ torch.set_float32_matmul_precision("high")
54
+ torch.backends.cuda.matmul.allow_fp16_accumulation = True
55
+
56
+ logger.info("[anima fp16 patch] patch loaded")