| import torch
|
|
|
| class ZImageConditioningContrast:
|
| """
|
| Applies Non-Linear Contrast (Power Scaling) to Z-Image/Qwen embeddings.
|
|
|
| Why this works where Multiplication fails:
|
| Z-Image applies Layer Normalization to inputs, which mathematically cancels out
|
| any linear multiplication (1.2 * x -> normalized -> x).
|
|
|
| This node applies a Power Function (x ^ exponent), which changes the
|
| distribution shape (Kurtosis) of the embedding. Normalization cannot
|
| revert this, allowing you to effectively 'sharpen' or 'soften' the prompt.
|
| """
|
|
|
| @classmethod
|
| def INPUT_TYPES(cls):
|
| return {
|
| "required": {
|
| "conditioning": ("CONDITIONING", ),
|
| "contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.05}),
|
| }
|
| }
|
|
|
| RETURN_TYPES = ("CONDITIONING",)
|
| RETURN_NAMES = ("conditioning",)
|
| FUNCTION = "apply_contrast"
|
| CATEGORY = "RES4LYF/conditioning"
|
|
|
| def apply_contrast(self, conditioning, contrast):
|
| c = []
|
| for t in conditioning:
|
|
|
|
|
|
|
| original_emb = t[0]
|
| new_emb = original_emb.sign() * original_emb.abs().pow(contrast)
|
|
|
|
|
| original_dict = t[1]
|
| new_dict = original_dict.copy()
|
|
|
|
|
| target_keys = ["conditioning_llama3", "llama_embeds", "pooled_output"]
|
|
|
| for key, value in new_dict.items():
|
| if isinstance(value, torch.Tensor):
|
|
|
| if "mask" in key or "ids" in key or "size" in key:
|
| continue
|
|
|
|
|
| if key in target_keys or (value.dim() >= 3 and value.shape[-1] > 64):
|
|
|
|
|
| new_dict[key] = (value.sign() * value.abs().pow(contrast)).contiguous()
|
|
|
| c.append([new_emb, new_dict])
|
|
|
| return (c,)
|
|
|
|
|
| NODE_CLASS_MAPPINGS = {
|
| "ZImageConditioningContrast": ZImageConditioningContrast
|
| }
|
|
|
| NODE_DISPLAY_NAME_MAPPINGS = {
|
| "ZImageConditioningContrast": "Z-Image Conditioning Contrast"
|
| } |