File size: 1,708 Bytes
51d152d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | from .cosmos_modernbert import CosmosBert
from comfy.comfy_types import IO
import torch
from transformers import AutoTokenizer
from transformers.models.modernbert.configuration_modernbert import ModernBertConfig
class CosmosBertTextEncode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"text": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
"clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
}
}
RETURN_TYPES = (IO.CONDITIONING,)
OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",)
FUNCTION = "encode"
CATEGORY = "conditioning"
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
SEARCH_ALIASES = ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"]
def encode(self, clip, text):
bert = CosmosBert(ModernBertConfig())
bert.load_safetensors('model.safetensors')
if clip is None:
raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
tokenizer = AutoTokenizer.from_pretrained('nightknocker/cosmos-bert')
inputs = tokenizer(text, return_tensors='pt')
crossattn_emb, keys = bert.encode(**inputs)
return [(crossattn_emb, keys)],
NODE_CLASS_MAPPINGS = {
"CosmosBertTextEncode": CosmosBertTextEncode,
}
|