import re import os from ..utils.io import load_image_from_assets, safe_path # --- BEGIN: merged bucket logic (from Load_BoyGirlBucket_Salia) ---------------- CUSTOM_TOKENS = { ("boy", 0): "toddlerboy shota", ("boy", 1): "child shota", ("boy", 2): "preteen shota", ("boy", 3): "preteen boy", ("boy", 4): "teenage boy", ("boy", 5): "man", ("girl", 0): "toddlergirl loli", ("girl", 1): "child loli", ("girl", 2): "preteen loli", ("girl", 3): "preteen girl", ("girl", 4): "teenage girl", ("girl", 5): "woman", } _ALLOWED = {f"boy{i}.png" for i in range(6)} | {f"girl{i}.png" for i in range(6)} _num2_re = re.compile(r"(\d{2,})") # first 2+ digit number _num1_re = re.compile(r"(? int: if n is None: return 5 if n <= 5: return 0 if n <= 8: return 1 if n <= 11: return 2 if n <= 14: return 3 if n <= 19: return 4 return 5 def _parse_gender(text: str) -> str: # NOTE: keep original tolerant behavior: default to girl if not matched return "boy" if _boy_re.search(text or "") else "girl" def _parse_number(text: str) -> int | None: if not text: return None m = _num2_re.search(text) if m: try: return int(m.group(1)) except: pass m = _num1_re.search(text) if m: try: return int(m.group(1)) except: pass return None # --- END: merged bucket logic -------------------------------------------------- class Salia_BAM_2: """ Multi-view output node (Front/Side/Diagonal/Rear) + per-view prefixes/suffixes. Outputs: Frontview_pos, Sideview_pos, Diagonal_pos, Rearview_pos, image, Frontview_neg, Sideview_neg, Diagonal_neg, Rearview_neg, BAM_out, age (INT) BAM_in: - If BAM_in == "" (exactly empty), behave normally (use UI inputs). - If BAM_in is non-empty, parse and overwrite ALL inputs. BAM_out: - BAM format string with all effective inputs used (after overwrite). - Fields separated with '###', wrapped in 'START### ... ###END###' - Empty fields are emitted as '0' - Gender emitted as: 1 == boy, 2 == girl Special rules: - Frontview_pos gets extra prefix: (masterpiece:0.8), (symmetrical eyes:1.2), (left and right eye masterpiece same:1.5), - Frontview_neg gets extra suffix: (asymmetrical face:1.5), (asymmetrical tail-arched eyebrows:1.0), (heterochromia:1.5), monochrome, sketch, colorless, (terribly drawn eyes:1.2), watermark, text - Diagonal_pos gets same as front positive EXCEPT no symmetrical/same-eye stuff (it still gets (masterpiece:0.8)) - Sideview_pos like diagonal_pos BUT "eyes" -> singular "eye" - Rearview_pos: no face/eyes/mouth/expression content at all - POV token per view: Front: (straight-on:1.15) Side: (from side POV:1.15) Diagonal:(POV from three-quarter view:1.15) Rear: (from back POV:1.15) - Diagonal_neg suffix: monochrome, sketch, colorless, (terribly drawn eyes:1.2), watermark, text - Sideview_neg suffix: monochrome, sketch, colorless, (terribly drawn eye:1.2), watermark, text (and we also singularize any "... eyes" tokens to "... eye" in Sideview_neg) - Rearview_neg suffix: (straight-on:1.15), (from side POV:1.15), (POV from three-quarter view:1.15), visible face, (visible eyes:1.2), monochrome, sketch, colorless, watermark, text Placeholder expansion in pos_prefix/pos_suffix/neg_prefix/neg_suffix: {age}, {gender}/{boygirl}, {CUSTOM_TOKENS}/{identity_age}, {identity}, {eyes}, {eyes_full} """ CATEGORY = "text/salia" POV_FRONT = "(straight-on:1.15)" POV_SIDE = "(from side POV:1.15)" POV_DIAGONAL = "(POV from three-quarter view:1.15)" POV_REAR = "(from back POV:1.15)" # ------------------------ generic helpers --------------------------------- @staticmethod def _collapse_spaces(s: str) -> str: return re.sub(r"\s+", " ", (s or "").strip()) @classmethod def _split_tags(cls, text: str): if not text: return [] s = str(text).replace("\n", ",") parts = [cls._collapse_spaces(p.strip(" ,")) for p in s.split(",")] return [p for p in parts if p] @staticmethod def _dedup_preserve(tokens): seen, out = set(), [] for t in tokens: t = (t or "").strip() if not t: continue k = t.lower() if k not in seen: seen.add(k) out.append(t) return out @classmethod def _ensure_suffix_word(cls, token: str, word: str) -> str: t = cls._collapse_spaces(token) if not t: return "" t = re.sub( rf"\b({re.escape(word)})\b(?:\s+\1\b)+", r"\1", t, flags=re.IGNORECASE ) if re.search(rf"\b{re.escape(word)}\b", t, re.IGNORECASE): return t return f"{t} {word}" # ------------------- BAM override + serialize ---------------------------- _BAM_DELIM = "###" _BAM_START = "START" _BAM_END = "END" _BAM_FIELDS = [ "gender", "age", "identity", "eyes", "hair", "equip_info", "aesthetic_tag1", "aesthetic_tag2", "aesthetic_tag3", "aesthetic_tag4", "aesthetic_tag5", "skin_tag1", "skin_tag2", "skin_tag3", "skin_tag4", "skin_tag5", "expression_tag1", "expression_tag2", "expression_tag3", "expression_tag4", "expression_tag5", "headwear_tag", "pos_prefix", "pos_suffix", "neg_prefix", "neg_suffix", "EXTRA_NEG", ] @classmethod def _bam_clean_field(cls, v: str) -> str: """ Normalize for BAM output: single-line, trimmed. Empty -> "0". """ s = "" if v is None else str(v) s = s.replace("\r\n", "\n").replace("\r", "\n").replace("\n", " ") s = s.strip() return "0" if s == "" else s @classmethod def _bam_gender_to_code(cls, gender_text: str) -> str: """ BAM output code: 1=boy, 2=girl. Uses existing _parse_gender() logic (boy matches; else girl). """ g = _parse_gender(str(gender_text or "")) return "1" if g == "boy" else "2" @classmethod def _bam_build_out(cls, fields: dict) -> str: """ Build BAM string from effective inputs. """ gender_code = cls._bam_gender_to_code(fields.get("gender", "")) ordered_values = [ gender_code, fields.get("age", ""), fields.get("identity", ""), fields.get("eyes", ""), fields.get("hair", ""), fields.get("equip_info", ""), fields.get("aesthetic_tag1", ""), fields.get("aesthetic_tag2", ""), fields.get("aesthetic_tag3", ""), fields.get("aesthetic_tag4", ""), fields.get("aesthetic_tag5", ""), fields.get("skin_tag1", ""), fields.get("skin_tag2", ""), fields.get("skin_tag3", ""), fields.get("skin_tag4", ""), fields.get("skin_tag5", ""), fields.get("expression_tag1", ""), fields.get("expression_tag2", ""), fields.get("expression_tag3", ""), fields.get("expression_tag4", ""), fields.get("expression_tag5", ""), fields.get("headwear_tag", ""), fields.get("pos_prefix", ""), fields.get("pos_suffix", ""), fields.get("neg_prefix", ""), fields.get("neg_suffix", ""), fields.get("EXTRA_NEG", ""), ] ordered_values = [cls._bam_clean_field(v) for v in ordered_values] return f"{cls._BAM_START}{cls._BAM_DELIM}" + cls._BAM_DELIM.join(ordered_values) + f"{cls._BAM_DELIM}{cls._BAM_END}{cls._BAM_DELIM}" @classmethod def _bam_parse_in(cls, bam_text: str) -> dict: """ Parse BAM_in string and return dict of fields (strings). Rules: - Optional START/END wrapper supported - Token "0" means empty string - Gender token accepts: "1" => boy "2" => girl also accepts "boy"/"girl" text (and your existing boy synonyms via _boy_re) - If fewer fields than expected: pads with empty - If more fields than expected: truncates extras """ raw = "" if bam_text is None else str(bam_text) toks = raw.split(cls._BAM_DELIM) toks = [t.strip() for t in toks] # trim empty tokens at ends (common if string ends with ###) while toks and toks[0] == "": toks.pop(0) while toks and toks[-1] == "": toks.pop() # extract between START and END if present start_idx = None for i, t in enumerate(toks): if t.upper() == cls._BAM_START: start_idx = i break end_idx = None for i in range(len(toks) - 1, -1, -1): if toks[i].upper() == cls._BAM_END: end_idx = i break if start_idx is not None and end_idx is not None and end_idx > start_idx: toks = toks[start_idx + 1 : end_idx] elif start_idx is not None: toks = toks[start_idx + 1 :] elif end_idx is not None: toks = toks[:end_idx] # "0" => "" toks = [("" if t == "0" else t) for t in toks] expected = len(cls._BAM_FIELDS) if len(toks) < expected: toks += [""] * (expected - len(toks)) elif len(toks) > expected: toks = toks[:expected] out = dict(zip(cls._BAM_FIELDS, toks)) # normalize gender token to "boy"/"girl" for the node g = (out.get("gender") or "").strip() g_low = g.lower() if g_low == "1": out["gender"] = "boy" elif g_low == "2": out["gender"] = "girl" else: out["gender"] = "boy" if _boy_re.search(g) else "girl" return out # ------------------- placeholder expansion ------------------------------- _PH_RE = re.compile(r"\{([A-Za-z0-9_]+)\}") @classmethod def _render_placeholders(cls, text: str, mapping: dict) -> str: if not text: return "" mp = {str(k).lower(): "" if v is None else str(v) for k, v in (mapping or {}).items()} def repl(m): key = (m.group(1) or "").strip().lower() if key in mp: return mp[key] return m.group(0) # unknown placeholder stays as-is return cls._PH_RE.sub(repl, text) # ------------------- age/gender parsing helpers --------------------------- @classmethod def _find_first_int(cls, s: str): m = re.search(r"(? str: return (s or "").lower().strip() @classmethod def _parse_age_group(cls, inp: str): age = cls._find_first_int(inp) if age is not None: if age <= 12: return "child" elif 13 <= age <= 19: return "teen" elif 20 <= age <= 49: return "adult" else: return "elderly" return None @classmethod def _parse_age_group(cls, inp: str): age = cls._find_first_int(inp) if age is not None: if age >= 50: return "elderly" if age >= 18: return "adult" return "adult" @staticmethod def _negatives_for_gender(subject_gender: str): return ["female"] if subject_gender == "male" else ["male"] @staticmethod def _negatives_for_agegroup(age_group: str, subject_gender: str): if age_group == "elderly": return ["adult", "teen", "loli", "shota"] if age_group == "adult": return ["elderly", "teen", "loli", "shota"] if age_group == "teen": return ["elderly", "adult", "loli", "shota"] if age_group == "child": if subject_gender == "male": return ["elderly", "adult", "teen", "loli"] if subject_gender == "female": return ["elderly", "adult", "teen", "shota"] return ["elderly", "adult", "teen"] return [] # ------------------- eyes formatting + negatives -------------------------- _EYE_COLORS = ["brown", "blue", "grey", "red", "green", "teal"] _EYE_ALIASES = {"gray": "grey"} _EYE_WORD_RE = re.compile(r"\beyes?\b", re.IGNORECASE) @classmethod def _eye_norm(cls, token: str) -> str: t = (token or "").lower().strip() return cls._EYE_ALIASES.get(t, t) @classmethod def _eye_base(cls, s: str): pattern = r"\b(" + "|".join(sorted(set(cls._EYE_COLORS + list(cls._EYE_ALIASES.keys())), key=len, reverse=True)) + r")\b" m = re.search(pattern, s or "", re.IGNORECASE) if not m: return None base = cls._eye_norm(m.group(1)) return base if base in cls._EYE_COLORS else None @classmethod def _eye_others(cls, base: str): if base is None: return [] return [f"{c} eyes" for c in cls._EYE_COLORS if c != base] @classmethod def _format_eyes_tokens(cls, eyes_text: str): parts = cls._split_tags(eyes_text) out = [] for p in parts: p = cls._collapse_spaces(p) p = re.sub(r"\beye\b$", "eyes", p, flags=re.IGNORECASE) if cls._EYE_WORD_RE.search(p): base = cls._eye_base(p) out.append(f"{base} eyes" if base is not None else p) else: base = cls._eye_base(p) if base is not None and p.strip().lower() in (base, *cls._EYE_ALIASES.keys()): out.append(f"{base} eyes") else: out.append(f"{p} eyes") out = cls._dedup_preserve(out) return out, cls._eye_base(eyes_text) @classmethod def _eyes_placeholder_value(cls, eyes_in: str, eye_base: str | None) -> str: """ Used for {eyes} placeholder AND for Frontview symmetrical prefix. Guarantees it is NOT "blue eyes" (so we don't produce "eyes eyes"). """ if eye_base: return eye_base raw = cls._collapse_spaces(eyes_in) raw2 = re.sub(r"\beyes?\b", "", raw, flags=re.IGNORECASE) raw2 = cls._collapse_spaces(raw2) return raw2 if raw2 else raw @staticmethod def _eyes_to_singular(tokens): """ Convert "blue eyes" -> "blue eye" etc. """ out = [] for t in tokens or []: if not t: continue out.append(re.sub(r"\beyes\b", "eye", t, flags=re.IGNORECASE)) return out # ------------------- hair formatting + negatives -------------------------- _HAIR_COLORS = [ "black", "brown", "blonde", "auburn", "ginger", "blue", "green", "pink", "purple", "white", "gray", "silver", "cyan", "teal", "magenta", "violet", ] _HAIR_ALIASES = { "blond": "blonde", "grey": "gray", "red": "auburn", "yellow": "blonde", "orange": "ginger", } _HAIR_CONF2 = { "black": ["purple hair", "blue hair"], "brown": ["auburn hair", "black hair"], "blonde": ["ginger hair", "white hair"], "auburn": ["ginger hair", "brown hair"], "ginger": ["auburn hair", "blonde hair"], "pink": ["purple hair", "red hair"], "purple": ["pink hair", "blue hair"], "blue": ["purple hair", "teal hair"], "green": ["teal hair", "blue hair"], "white": ["gray hair", "silver hair"], "gray": ["white hair", "silver hair"], "silver": ["white hair", "gray hair"], "cyan": ["blue hair", "teal hair"], "teal": ["cyan hair", "green hair"], "magenta": ["pink hair", "purple hair"], "violet": ["purple hair", "pink hair"], } _HAIR_NORM = {**{c: c for c in _HAIR_COLORS}, **_HAIR_ALIASES} _HAIR_TOKENS = tuple(sorted(set(_HAIR_COLORS + list(_HAIR_ALIASES.keys())), key=len, reverse=True)) _HAIR_TOKEN_RE = re.compile(r"\b(?:" + "|".join(map(re.escape, _HAIR_TOKENS)) + r")\b", re.IGNORECASE | re.ASCII) _HAIR_WORD_RE = re.compile(r"\bhair\b", re.IGNORECASE) _HAIR_LENGTH_RE = re.compile(r"\b(very\s+long|long|medium|short|bald)\b", re.IGNORECASE) @classmethod def _hair_length_in_token(cls, token: str): if not token: return None m = cls._HAIR_LENGTH_RE.search(token) if not m: return None v = cls._collapse_spaces(m.group(1).lower()) return "very long" if v == "very long" else v @classmethod def _canonicalize_hair_color_in_token(cls, token: str) -> str: t = cls._collapse_spaces(token) if not t: return "" low = t.lower() m = re.fullmatch( r"(" + "|".join(map(re.escape, cls._HAIR_TOKENS)) + r")(?:\s+hair)?", low ) if m: canon = cls._HAIR_NORM.get(m.group(1).lower(), m.group(1).lower()) return f"{canon} hair" return t @classmethod def _normalize_hair_token(cls, token: str) -> str: t = cls._canonicalize_hair_color_in_token(token) if not t: return "" t = re.sub(r"\b(hair)\b(?:\s+\1\b)+", r"\1", t, flags=re.IGNORECASE) if cls._HAIR_WORD_RE.search(t): return t if cls._hair_length_in_token(t) == "bald": return "bald head" if cls._HAIR_TOKEN_RE.search(t): return f"{t} hair" if cls._hair_length_in_token(t) in {"short", "medium", "long", "very long"}: return f"{t} hair" if " " not in t: return f"{t} hair" return t @classmethod def _format_hair_tokens(cls, hair_text: str): parts = cls._split_tags(hair_text) if not parts: return [], None, None # color first color_idx, base_color = None, None for i, p in enumerate(parts): m = cls._HAIR_TOKEN_RE.search(p) if m: color_idx = i base_color = cls._HAIR_NORM.get(m.group(0).lower()) break # then first length (excluding color token) length_idx, hair_len = None, None for i, p in enumerate(parts): if i == color_idx: continue l = cls._hair_length_in_token(p) if l: length_idx = i hair_len = l break ordered = [] if color_idx is not None: ordered.append(parts[color_idx]) if length_idx is not None: ordered.append(parts[length_idx]) for i, p in enumerate(parts): if i == color_idx or i == length_idx: continue ordered.append(p) norm = [cls._normalize_hair_token(t) for t in ordered] norm = [t for t in norm if t] norm = cls._dedup_preserve(norm) return norm, base_color, hair_len @classmethod def _hair_other_colors(cls, base: str): if not base: return [] canon_colors = list(cls._HAIR_CONF2.keys()) return [f"{c} hair" for c in canon_colors if c != base] @classmethod def _hair_conf2(cls, text: str): if not text: return [] m = cls._HAIR_TOKEN_RE.search(text) if not m: return [] base = cls._HAIR_NORM.get(m.group(0).lower()) return (cls._HAIR_CONF2.get(base, [])[:2]) if base else [] # ------------------- Rearview positive filter ----------------------------- @classmethod def _rear_pos_filter(cls, tokens): """ Remove anything mentioning eyes/face/mouth/expression for Rearview_pos. (Also removes "eye..." substrings like "eyeshadow".) """ out = [] for t in tokens or []: low = (t or "").lower() if not low.strip(): continue if "eye" in low: # catches eyes/eye/eyeshadow/eyebrow etc. continue if re.search(r"\b(face|mouth|expression)\b", low, re.IGNORECASE): continue out.append(t) return out # ------------------------- ComfyUI interface ------------------------------ @classmethod def INPUT_TYPES(cls): return { "required": { # NEW: BAM override input (empty string => behave normally) "BAM_in": ("STRING", {"multiline": True, "default": ""}), "gender": ("STRING", {"default": "{{gender}} (enter 'girl' or 'boy')"}), "age": ("STRING", {"default": "{{age}} (e.g., 33)"}), "identity": ("STRING", {"default": "{{identity}} (e.g., rogue)"}), "eyes": ("STRING", {"default": "{{eyes}} (e.g., blue OR 'blue eyes')"}), "hair": ("STRING", { "multiline": True, "default": "{{hair}} (e.g., brown, long, messy OR 'brown hair, long hair, messy hair')" }), "equip_info": ("STRING", { "multiline": True, "default": "{{EQUIP}}" }), "aesthetic_tag1": ("STRING", {"default": ""}), "aesthetic_tag2": ("STRING", {"default": ""}), "aesthetic_tag3": ("STRING", {"default": ""}), "aesthetic_tag4": ("STRING", {"default": ""}), "aesthetic_tag5": ("STRING", {"default": ""}), "skin_tag1": ("STRING", {"default": ""}), "skin_tag2": ("STRING", {"default": ""}), "skin_tag3": ("STRING", {"default": ""}), "skin_tag4": ("STRING", {"default": ""}), "skin_tag5": ("STRING", {"default": ""}), "expression_tag1": ("STRING", {"default": ""}), "expression_tag2": ("STRING", {"default": ""}), "expression_tag3": ("STRING", {"default": ""}), "expression_tag4": ("STRING", {"default": ""}), "expression_tag5": ("STRING", {"default": ""}), "headwear_tag": ("STRING", {"default": ""}), "pos_prefix": ("STRING", {"multiline": True, "default": ""}), "pos_suffix": ("STRING", {"multiline": True, "default": ""}), "neg_prefix": ("STRING", {"multiline": True, "default": ""}), "neg_suffix": ("STRING", {"multiline": True, "default": ""}), "EXTRA_NEG": ("STRING", {"multiline": True, "default": ""}), } } RETURN_TYPES = ( "STRING", "STRING", "STRING", "STRING", "IMAGE", "STRING", "STRING", "STRING", "STRING", "STRING", # BAM_out "INT", # age ) RETURN_NAMES = ( "Frontview_pos", "Sideview_pos", "Diagonal_pos", "Rearview_pos", "image", "Frontview_neg", "Sideview_neg", "Diagonal_neg", "Rearview_neg", "BAM_out", "age", ) FUNCTION = "run" # ------------------------------- main ------------------------------------- def run( self, BAM_in, gender, age, identity, eyes, hair, equip_info, aesthetic_tag1, aesthetic_tag2, aesthetic_tag3, aesthetic_tag4, aesthetic_tag5, skin_tag1, skin_tag2, skin_tag3, skin_tag4, skin_tag5, expression_tag1, expression_tag2, expression_tag3, expression_tag4, expression_tag5, headwear_tag, pos_prefix, pos_suffix, neg_prefix, neg_suffix, EXTRA_NEG, ): # ---------- BAM override (overwrite ALL inputs) ---------- bam_override_in = str(BAM_in or "") if bam_override_in != "": parsed = self._bam_parse_in(bam_override_in) gender = parsed["gender"] age = parsed["age"] identity = parsed["identity"] eyes = parsed["eyes"] hair = parsed["hair"] equip_info = parsed["equip_info"] aesthetic_tag1 = parsed["aesthetic_tag1"] aesthetic_tag2 = parsed["aesthetic_tag2"] aesthetic_tag3 = parsed["aesthetic_tag3"] aesthetic_tag4 = parsed["aesthetic_tag4"] aesthetic_tag5 = parsed["aesthetic_tag5"] skin_tag1 = parsed["skin_tag1"] skin_tag2 = parsed["skin_tag2"] skin_tag3 = parsed["skin_tag3"] skin_tag4 = parsed["skin_tag4"] skin_tag5 = parsed["skin_tag5"] expression_tag1 = parsed["expression_tag1"] expression_tag2 = parsed["expression_tag2"] expression_tag3 = parsed["expression_tag3"] expression_tag4 = parsed["expression_tag4"] expression_tag5 = parsed["expression_tag5"] headwear_tag = parsed["headwear_tag"] pos_prefix = parsed["pos_prefix"] pos_suffix = parsed["pos_suffix"] neg_prefix = parsed["neg_prefix"] neg_suffix = parsed["neg_suffix"] EXTRA_NEG = parsed["EXTRA_NEG"] # ---------- normalize inputs to strings ---------- gender_in = str(gender or "") age_in = str(age or "") identity_in = str(identity or "") eyes_in = str(eyes or "") hair_in = str(hair or "") equip_in = str(equip_info or "") headwear_in = str(headwear_tag or "") pos_prefix_in = str(pos_prefix or "") pos_suffix_in = str(pos_suffix or "") neg_prefix_in = str(neg_prefix or "") neg_suffix_in = str(neg_suffix or "") extra_neg_in = str(EXTRA_NEG or "") # ---------- BAM_out (serialize effective inputs) ---------- BAM_out = self._bam_build_out({ "gender": gender_in, "age": age_in, "identity": identity_in, "eyes": eyes_in, "hair": hair_in, "equip_info": equip_in, "aesthetic_tag1": str(aesthetic_tag1 or ""), "aesthetic_tag2": str(aesthetic_tag2 or ""), "aesthetic_tag3": str(aesthetic_tag3 or ""), "aesthetic_tag4": str(aesthetic_tag4 or ""), "aesthetic_tag5": str(aesthetic_tag5 or ""), "skin_tag1": str(skin_tag1 or ""), "skin_tag2": str(skin_tag2 or ""), "skin_tag3": str(skin_tag3 or ""), "skin_tag4": str(skin_tag4 or ""), "skin_tag5": str(skin_tag5 or ""), "expression_tag1": str(expression_tag1 or ""), "expression_tag2": str(expression_tag2 or ""), "expression_tag3": str(expression_tag3 or ""), "expression_tag4": str(expression_tag4 or ""), "expression_tag5": str(expression_tag5 or ""), "headwear_tag": headwear_in, "pos_prefix": pos_prefix_in, "pos_suffix": pos_suffix_in, "neg_prefix": neg_prefix_in, "neg_suffix": neg_suffix_in, "EXTRA_NEG": extra_neg_in, }) # ---------- Bucket selection from age+gender ---------- gender_norm = _parse_gender(gender_in) n = _parse_number(age_in) age_out_int = 0 if n is None else int(n) bucket = _bucket_from_number(n) fname = f"{gender_norm}{bucket}.png" if fname not in _ALLOWED: raise FileNotFoundError(f"Unexpected filename: {fname}") path = safe_path(fname) if not os.path.isfile(path): raise FileNotFoundError(f"Required asset not found: {fname} (place it in assets/images/)") img, _ = load_image_from_assets(fname) identity_age_token = CUSTOM_TOKENS.get((gender_norm, bucket), fname) one_girl_boy = "1" + gender_norm # ---------- Format EYES + HAIR ---------- eyes_tokens, eye_base = self._format_eyes_tokens(eyes_in) # e.g. ["blue eyes"] hair_tokens, hair_base, hair_len = self._format_hair_tokens(hair_in) # ---------- Placeholder mapping (prefix/suffix expansion) ---------- age_for_text = self._find_first_int(age_in) if age_for_text is None: age_for_text = self._collapse_spaces(age_in) eyes_ph = self._eyes_placeholder_value(eyes_in, eye_base) ph = { "age": age_for_text, "gender": gender_norm, "boygirl": gender_norm, "custom_tokens": identity_age_token, "custom_token": identity_age_token, "identity_age": identity_age_token, "identity": identity_in, # {eyes} is ALWAYS a color/descriptor WITHOUT "eyes" "eyes": eyes_ph, # {eyes_full} is the formatted first token ("blue eyes") "eyes_full": (eyes_tokens[0] if eyes_tokens else self._collapse_spaces(eyes_in)), } pos_prefix_in = self._render_placeholders(pos_prefix_in, ph) pos_suffix_in = self._render_placeholders(pos_suffix_in, ph) neg_prefix_in = self._render_placeholders(neg_prefix_in, ph) neg_suffix_in = self._render_placeholders(neg_suffix_in, ph) # ---------- Headwear logic ---------- headwear_tokens = self._split_tags(headwear_in) has_headwear = bool(headwear_tokens) pos_head_tokens = [] neg_head_tokens = [] if not has_headwear: if hair_len == "bald": pos_head_tokens.append("bald head fully visible") else: pos_head_tokens.append("uncovered head") if hair_len in {"short", "medium", "long", "very long"}: pos_head_tokens.append(f"{hair_len} hair fully visible") else: pos_head_tokens.append("hair fully visible") neg_head_tokens += ["headwear", "hat"] else: pos_head_tokens.append(headwear_tokens[0]) neg_head_tokens += ["uncovered head"] # ---------- Tags (force suffixes) ---------- aesthetic_tokens = self._split_tags(",".join([aesthetic_tag1, aesthetic_tag2, aesthetic_tag3, aesthetic_tag4, aesthetic_tag5])) skin_tokens = self._split_tags(",".join([skin_tag1, skin_tag2, skin_tag3, skin_tag4, skin_tag5])) expr_tokens = self._split_tags(",".join([expression_tag1, expression_tag2, expression_tag3, expression_tag4, expression_tag5])) aesthetic_tokens = [self._ensure_suffix_word(t, "aesthetic") for t in aesthetic_tokens if t] skin_tokens = [self._ensure_suffix_word(t, "skin") for t in skin_tokens if t] expr_tokens = [self._ensure_suffix_word(t, "expression") for t in expr_tokens if t] equip_tokens = self._split_tags(equip_in) # ---------- View-specific fixed pieces ---------- # Front positive extra prefix FRONT_POS_EXTRA = [ "(masterpiece:0.8)", f"(symmetrical {eyes_ph} eyes:1.2)", "(left and right eye masterpiece same:1.5)", ] # Diagonal/Side positive extra prefix (no symmetrical/same-eye) DIAG_SIDE_POS_EXTRA = [ "(masterpiece:0.8)", ] FRONT_NEG_SUFFIX = [ "(asymmetrical face:1.5)", "(asymmetrical tail-arched eyebrows:1.0)", "(heterochromia:1.5)", "monochrome", "sketch", "colorless", "(terribly drawn eyes:1.2)", "watermark", "text", ] DIAGONAL_NEG_SUFFIX = [ "monochrome", "sketch", "colorless", "(terribly drawn eyes:1.2)", "watermark", "text", ] SIDE_NEG_SUFFIX = [ "monochrome", "sketch", "colorless", "(terribly drawn eye:1.2)", "watermark", "text", ] REAR_NEG_SUFFIX = [ "(straight-on:1.15)", "(from side POV:1.15)", "(POV from three-quarter view:1.15)", "visible face", "(visible eyes:1.2)", "monochrome", "sketch", "colorless", "watermark", "text", ] # ---------- Build POSITIVE prompts ---------- base_pos_prefix_tokens = self._split_tags(pos_prefix_in) base_pos_suffix_tokens = self._split_tags(pos_suffix_in) # Front front_pos_tokens = [] front_pos_tokens += base_pos_prefix_tokens front_pos_tokens += FRONT_POS_EXTRA front_pos_tokens += [one_girl_boy, self.POV_FRONT] front_pos_tokens += eyes_tokens front_pos_tokens += hair_tokens front_pos_tokens += pos_head_tokens front_pos_tokens += equip_tokens front_pos_tokens += [identity_age_token, identity_in] front_pos_tokens += aesthetic_tokens front_pos_tokens += skin_tokens front_pos_tokens += expr_tokens front_pos_tokens += ["against grey background"] front_pos_tokens += base_pos_suffix_tokens front_pos_tokens = [self._collapse_spaces(t) for t in front_pos_tokens if self._collapse_spaces(t)] Frontview_pos = ", ".join(front_pos_tokens) # Diagonal (same as front but no symmetrical/same-eye prefix; uses diagonal POV) diag_pos_tokens = [] diag_pos_tokens += base_pos_prefix_tokens diag_pos_tokens += DIAG_SIDE_POS_EXTRA diag_pos_tokens += [one_girl_boy, self.POV_DIAGONAL] diag_pos_tokens += eyes_tokens diag_pos_tokens += hair_tokens diag_pos_tokens += pos_head_tokens diag_pos_tokens += equip_tokens diag_pos_tokens += [identity_age_token, identity_in] diag_pos_tokens += aesthetic_tokens diag_pos_tokens += skin_tokens diag_pos_tokens += expr_tokens diag_pos_tokens += ["against grey background"] diag_pos_tokens += base_pos_suffix_tokens diag_pos_tokens = [self._collapse_spaces(t) for t in diag_pos_tokens if self._collapse_spaces(t)] Diagonal_pos = ", ".join(diag_pos_tokens) # Side (like diagonal but eyes -> eye singular; uses side POV) side_eyes_tokens = self._eyes_to_singular(eyes_tokens) side_pos_tokens = [] side_pos_tokens += base_pos_prefix_tokens side_pos_tokens += DIAG_SIDE_POS_EXTRA side_pos_tokens += [one_girl_boy, self.POV_SIDE] side_pos_tokens += side_eyes_tokens side_pos_tokens += hair_tokens side_pos_tokens += pos_head_tokens side_pos_tokens += equip_tokens side_pos_tokens += [identity_age_token, identity_in] side_pos_tokens += aesthetic_tokens side_pos_tokens += skin_tokens side_pos_tokens += expr_tokens side_pos_tokens += ["against grey background"] side_pos_tokens += base_pos_suffix_tokens side_pos_tokens = [self._collapse_spaces(t) for t in side_pos_tokens if self._collapse_spaces(t)] Sideview_pos = ", ".join(side_pos_tokens) # Rear (no eyes/face/mouth/expression anywhere in positive; uses rear POV) rear_pos_tokens = [] rear_pos_tokens += base_pos_prefix_tokens rear_pos_tokens += [one_girl_boy, self.POV_REAR] # NO eyes tokens in rear rear_pos_tokens += hair_tokens rear_pos_tokens += pos_head_tokens rear_pos_tokens += equip_tokens rear_pos_tokens += [identity_age_token, identity_in] rear_pos_tokens += aesthetic_tokens rear_pos_tokens += skin_tokens # NO expression tokens in rear rear_pos_tokens += ["against grey background"] rear_pos_tokens += base_pos_suffix_tokens rear_pos_tokens = [self._collapse_spaces(t) for t in rear_pos_tokens if self._collapse_spaces(t)] rear_pos_tokens = self._rear_pos_filter(rear_pos_tokens) Rearview_pos = ", ".join(rear_pos_tokens) # ---------- Build NEGATIVE prompts (base + view suffix) ---------- def build_base_neg_tokens(): neg_tokens = [] neg_tokens += self._split_tags(neg_prefix_in) parse_seed = f"{gender_in} {age_in}" subj_gender = self._parse_gender_neg(parse_seed) age_group = self._parse_age_group(parse_seed) neg_tokens += self._negatives_for_gender(subj_gender) neg_tokens += self._negatives_for_agegroup(age_group, subj_gender) neg_tokens += self._eye_others(eye_base) neg_tokens += self._hair_other_colors(hair_base) neg_tokens += self._hair_conf2(hair_in) neg_tokens += neg_head_tokens neg_tokens += self._split_tags(extra_neg_in) neg_tokens += self._split_tags(neg_suffix_in) neg_tokens = [self._collapse_spaces(t) for t in neg_tokens if self._collapse_spaces(t)] return neg_tokens base_neg = build_base_neg_tokens() # Front neg front_neg_tokens = list(base_neg) + FRONT_NEG_SUFFIX front_neg_tokens = self._dedup_preserve(front_neg_tokens) Frontview_neg = ", ".join(front_neg_tokens) # Diagonal neg diag_neg_tokens = list(base_neg) + DIAGONAL_NEG_SUFFIX diag_neg_tokens = self._dedup_preserve(diag_neg_tokens) Diagonal_neg = ", ".join(diag_neg_tokens) # Side neg (also convert any "eyes" -> "eye") side_neg_tokens = list(base_neg) + SIDE_NEG_SUFFIX side_neg_tokens = [re.sub(r"\beyes\b", "eye", t, flags=re.IGNORECASE) for t in side_neg_tokens] side_neg_tokens = self._dedup_preserve(side_neg_tokens) Sideview_neg = ", ".join(side_neg_tokens) # Rear neg rear_neg_tokens = list(base_neg) + REAR_NEG_SUFFIX rear_neg_tokens = self._dedup_preserve(rear_neg_tokens) Rearview_neg = ", ".join(rear_neg_tokens) return ( Frontview_pos, Sideview_pos, Diagonal_pos, Rearview_pos, img, Frontview_neg, Sideview_neg, Diagonal_neg, Rearview_neg, BAM_out, age_out_int, ) NODE_CLASS_MAPPINGS = {"Salia_BAM_2": Salia_BAM_2} NODE_DISPLAY_NAME_MAPPINGS = {"Salia_BAM_2": "Salia_BAM_2"}