import gradio as gr
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import csr_matrix
import numpy as np
import joblib
from joblib import load
import h5py
from io import BytesIO
import csv
import re
import random
import compress_fasttext
from collections import OrderedDict
from lark import Lark, Tree, Token
from lark.exceptions import ParseError
import json
import zipfile
from PIL import Image
import io
import os
import glob
import itertools
from itertools import islice
from pathlib import Path
import logging
import hnswlib
import pathlib
from collections import Counter
# Set up logging
# Minimal prod logging: warnings+ to stderr, no file by default
import os, logging
LOG_LEVEL = os.environ.get("PSQ_LOG_LEVEL", "WARNING").upper()
logging.basicConfig(
level=getattr(logging, LOG_LEVEL, logging.WARNING),
format="%(asctime)s %(levelname)s:%(message)s",
handlers=[logging.StreamHandler()] # no file -> avoids huge logs on Spaces
)
# Quiet down common noisy libs (optional)
for _name in ("gensim", "gradio", "hnswlib", "httpx", "uvicorn"):
logging.getLogger(_name).setLevel(logging.ERROR)
# Turn off Gradio analytics phone-home to avoid those background thread errors (optional)
os.environ["GRADIO_ANALYTICS_ENABLED"] = "0"
MASCOT_DIR = Path(__file__).parent / "mascotimages"
MASCOT_FILE = MASCOT_DIR / "transparentsquirrel.png"
faq_content="""
# Quick Start
Write your prompt as a simple comma‑separated list of things you want to see in your image, then press the Run button. Prompt Squirrel will:
* highlight any unknown or misspelled tags or syntax errors,
* suggest corrected tags,
* recommend additional tags based on context, and
* list artists who produce topically similar content
You do not need to structure the prompt in any special way; just describe what you want in short phrases separated by commas.
# System Overview
Prompt Squirrel uses NLP and vector‑space methods to map a free‑form prompt to the structured tag vocabulary expected by tag‑based Stable Diffusion models. Internally, we use a grammar parser, FastText embeddings, TF‑IDF and SVD for context scoring, and an approximate‑nearest‑neighbor index for artist and suggested tag retrieval. Our goal is to help users write prompts that align with the tag distributions the model was trained on.
See the Technical Details heading below for more information about how these all are used.
# Prompting Guidance and Common Questions
## What text to image models does this tool work for?
This instance of Prompt Squirrel works for tag-based Stable Diffusion models fine-tuned on the popular e621 dataset. The tags it returns and especially the artist names will only be recognized by models in this category, which includes popular models such as Fluffyrock and NoobAI.
## Does input order matter?
No.
## Should I use underscores or spaces in the input tags?
As a rule, models trained on the dataset replace underscores with spaces, so spaces are preferred
## Can I use parentheses or weights as in the Stable Diffusion Automatic1111 WebUI?
Yes, but only '(' and ')' and numerical weights. These are ignored in the underlying calculations but allowed so that prompts can be copied between tools with minimal editing. An acceptable example is:
((sunset over the mountains)), (clear sky:1.5), ((eagle flying high:2.0)), river, (fish swimming in the river:1.2), (campfire, (marshmallows:2.1):1.3), stars in the sky, ((full moon:1.8)), (wolf howling:1.7)
## Why are some valid tags marked as 'unknown', and why don't some artists ever get returned?
Some tags or artists are too infrequent in the dataset sample for us to make reliable predictions. Prompt Squirrel merges data from several sources, which may differ slightly in tag names or counts. Low‑frequency items or inconsistent entries may therefore not appear in results.
## Why do some suggested tags not have summaries or wiki links, and why do some look truncated?
Summaries and wiki links are extracted from dataset wiki pages. Some tags do not have pages, and summaries are heuristically extracted from the page beginnings, which can introduce small errors.
## Are there any special tags?
Yes. We normalized favorite counts to a range of 0–9. You may include: 'score:0' through 'score:9' These bias the output toward suggestions associated with images with higher or lower scores.
## Are there any other special tricks?
Yes. Repeating a tag gives it more weight in the artist‑similarity calculation. For example: 'red fox, red fox, red fox, score:7' will bias more strongly toward artists and suggested tags associated with 'red fox' than: 'red fox, score:7'.
## Why is this space tagged 'not‑for‑all‑audience'?
The dataset used by many tag‑based models contains both general‑audience and adult material. To avoid surprising users, mature tags are hidden unless the user explicitly enables them. This tool processes only text and metadata; no images from the dataset are displayed.
# Technical Details
## How is the artist list calculated?
Each artist is represented as a pseudo‑document containing the bag of all tags from their images. Your prompt is treated as another pseudo‑document. We compute similarity between the recognized tags in your prompt and each artist using TF‑IDF and truncated SVD, then retrieve the nearest artists using an approximate‑nearest‑neighbor index.
## How do the Suggested Tags work?
Each tag is represented as a pseudo-document containing the bag of all tags it co-occurs with in the dataset. We then employ exactly the same method on them as we did with artists to suggest tags similar to your prompt.
## How does the tag corrector work?
We treat each image as a document containing the set of its tags and randomly replace about 10% of the tags with aliases from the dataset's alias lists. We then train a FastText model on these documents so that tags and their aliases map to nearby vectors. This makes the system robust to spelling variations and rephrasings.
To incorporate the prompt's context, we again treat tags as pseudo-documents containing the bag of all tags they co-occur with, then compute TF‑IDF scores for the top candidate tags selected from the FastText Model, combining this similarity score with the FastText similarity. The Context Similarity Weight slider controls how much influence the TF‑IDF context score has relative to the FastText embedding similarity.
## How do the sample images work?
In each gallery row, we choose an illustrative prompt and generate one image for each artist using the popular model Fluffyrock Unleashed, which was trained on this dataset. The 'No Artist' image serves as a baseline, using the same prompt without an artist name. Each subsequent row repeats this process with a different prompt. The first prompt was chosen to illustrate foreground style, the second to illustrate background style, and the third to illustrate character design. See SamplePrompts.csv in the Files section for the list of prompts used.
"""
TOOLTIP_NOTE_HTML = '
Hover over underlined items for more info.
'
HOVER_HINT_CSS = """
/* Solid, visible underline for tagged items */
.gradio-container .hover-underline{
text-decoration-line: underline !important;
text-decoration-thickness: 2px;
text-underline-offset: 2px;
}
/* Small, subtle hint text */
.hover-hint{
font-size: 12px;
opacity: .85;
line-height: 1.2;
}
/* Wrapper to position the hint in the bottom-right of the annotated box */
.annotated-wrap{ position: relative; }
.annotated-wrap .hover-hint{
position: absolute;
right: 6px;
bottom: 6px;
text-align: right;
}
/* Extra padding around commas */
.badpad{
display:inline-block; /* make padding take effect */
padding: 0 0.60em; /* ~two extra spaces total on each side of ',' -> ~3-ch width overall */
border-radius: 3px; /* a tiny rounding looks nicer on a blocky background */
}
"""
try:
from gradio_client import utils as _gc_utils
_orig_get_type = _gc_utils.get_type
_orig_j2p = _gc_utils._json_schema_to_python_type
_orig_pub = _gc_utils.json_schema_to_python_type
def _get_type_safe(schema):
# Sometimes schema is a bare True/False (JSON Schema boolean form)
if not isinstance(schema, dict):
return "any"
return _orig_get_type(schema)
def _j2p_safe(schema, defs=None):
# Accept non-dict schemas (True/False/None) and treat as "any"
if not isinstance(schema, dict):
return "any"
return _orig_j2p(schema, defs or schema.get("$defs"))
def _pub_safe(schema):
# Public wrapper used by Gradio; keep it resilient too
if not isinstance(schema, dict):
return "any"
return _j2p_safe(schema, schema.get("$defs"))
_gc_utils.get_type = _get_type_safe
_gc_utils._json_schema_to_python_type = _j2p_safe
_gc_utils.json_schema_to_python_type = _pub_safe
except Exception as e:
print("gradio_client hotfix not applied:", e)
# -------------------------------------------------------------------------------
nsfw_threshold = 0.95 # Assuming the threshold value is defined here
css = HOVER_HINT_CSS + """
.scrollable-content{
max-height: 420px;
overflow-y: scroll; /* always show scrollbar */
overflow-x: hidden;
padding-right: 8px;
padding-bottom: 14px; /* <— add this */
scrollbar-gutter: stable; /* prevent layout shift as it fills */
/* Firefox */
scrollbar-width: auto;
scrollbar-color: rgba(180,180,180,.9) rgba(0,0,0,.15);
}
/* WebKit/Chromium (Chrome/Edge/Safari) */
.scrollable-content::-webkit-scrollbar{ width: 10px; }
.scrollable-content::-webkit-scrollbar-thumb{ background: rgba(180,180,180,.9); border-radius: 8px; }
.scrollable-content::-webkit-scrollbar-track{ background: rgba(0,0,0,.15); }
/* (Optional) make both scroll panes taller so they fill more of the column */
.pane-left .scrollable-content,
.pane-right .scrollable-content {
max-height: 610px; /* was 420px; tweak to taste */
}
"""
#Parser
grammar=r"""
!start: (prompt | /[][():]/+)*
prompt: (emphasized | plain | comma | WHITESPACE)*
!emphasized: "(" prompt ")"
| "(" prompt ":" [WHITESPACE] NUMBER [WHITESPACE] ")"
comma: ","
WHITESPACE: /\s+/
plain: /([^,\\\[\]():|]|\\.)+/
%import common.SIGNED_NUMBER -> NUMBER
"""
# Initialize the parser
parser = Lark(grammar, start='start')
# ---------- Two HNSW indexes: artists and non-artist tags ----------
_HNSW_ART = None
_HNSW_TAG = None
_HNSW_DIM = None
_HNSW_N_ART = None
_HNSW_N_TAG = None
_HNSW_ART_PATH = pathlib.Path("tfidf_hnsw_artists.bin")
_HNSW_TAG_PATH = pathlib.Path("tfidf_hnsw_tags.bin")
def _l2_normalize_rows(mat: np.ndarray) -> np.ndarray:
mat = np.asarray(mat, dtype=np.float32)
norms = np.linalg.norm(mat, axis=1, keepdims=True)
norms[norms == 0.0] = 1.0
return mat / norms
def _ensure_dual_hnsw_indexes():
"""
Build/load two HNSW indexes over the SVD-reduced TF-IDF matrix:
• _HNSW_ART — rows whose tag (with optional 'by_' stripped) is in the artist_set
• _HNSW_TAG — only rows that are NOT artist tags
Index item IDs are the ORIGINAL row indices in reduced_matrix.
"""
global _HNSW_ART, _HNSW_TAG, _HNSW_DIM, _HNSW_N_ART, _HNSW_N_TAG
if _HNSW_ART is not None and _HNSW_TAG is not None:
return
reduced_matrix = tf_idf_components['reduced_matrix'] # (N, D)
row_to_tag = tf_idf_components['row_to_tag'] # {row:int -> "tag_with_underscores"}
rm = _l2_normalize_rows(reduced_matrix).astype(np.float32)
n_items, dim = rm.shape
# Partition rows
artist_rows = []
tag_rows = []
for i in range(n_items):
tag = row_to_tag.get(i, "")
# Strip leading "by_" if present in the TF-IDF vocabulary, but don't rely on it.
base = tag[3:] if tag.startswith("by_") else tag
# Some corpora contain buckets you don't want shown as artists:
if tag in {"by_unknown_artist", "by_conditional_dnp"}:
tag_rows.append(i)
continue
if is_artist(base):
artist_rows.append(i)
else:
tag_rows.append(i)
logging.debug(f"HNSW partition: artists={len(artist_rows)} non_artists={len(tag_rows)}")
# Helper: build or load an index for a subset of rows
def _build_or_load(path: pathlib.Path, rows: list[int]) -> hnswlib.Index:
idx = hnswlib.Index(space='cosine', dim=dim)
need_build = True
if path.exists():
try:
idx.load_index(str(path), max_elements=max(1, len(rows)))
# Rebuild if the saved index count doesn’t match our rows
if getattr(idx, "get_current_count", None) and idx.get_current_count() == len(rows) and len(rows) > 0:
need_build = False
else:
logging.debug(f"Rebuilding {path.name}: saved_count!=rows_len ({idx.get_current_count()} vs {len(rows)})")
except Exception as e:
logging.debug(f"Reload {path.name} failed, rebuilding: {e}")
if need_build:
try:
if path.exists():
path.unlink()
except Exception:
pass
idx.init_index(max_elements=max(1, len(rows)), ef_construction=200, M=16)
if rows:
idx.add_items(rm[rows], ids=np.asarray(rows, dtype=np.int32))
idx.save_index(str(path))
idx.set_ef(200)
return idx
_HNSW_ART = _build_or_load(_HNSW_ART_PATH, artist_rows)
_HNSW_TAG = _build_or_load(_HNSW_TAG_PATH, tag_rows)
_HNSW_DIM = dim
_HNSW_N_ART = len(artist_rows)
_HNSW_N_TAG = len(tag_rows)
def _hnsw_query(idx: hnswlib.Index, vec: np.ndarray, k: int):
"""
Query a given HNSW index with a (1, D) or (D,) vector in SVD space.
Returns (indices, sims) with cosine similarity scores.
"""
_ensure_dual_hnsw_indexes()
q = np.asarray(vec, dtype=np.float32).reshape(-1)
q_norm = np.linalg.norm(q)
if q_norm > 0:
q = q / q_norm
labels, dists = idx.knn_query(q, k=k)
inds = labels[0]
sims = 1.0 - dists[0] # cosine distance -> similarity
return inds, sims
def _ann_tags_topk(vec: np.ndarray, k: int):
_ensure_dual_hnsw_indexes()
k = min(k, _HNSW_N_TAG if _HNSW_N_TAG else 0)
return _hnsw_query(_HNSW_TAG, vec, k) if k else (np.array([], dtype=int), np.array([], dtype=float))
def _ann_artists_topk(vec: np.ndarray, k: int):
_ensure_dual_hnsw_indexes()
k = min(k, _HNSW_N_ART if _HNSW_N_ART else 0)
return _hnsw_query(_HNSW_ART, vec, k) if k else (np.array([], dtype=int), np.array([], dtype=float))
# ------------------------------------------------------------------
def _norm_tag_for_lookup(s: str) -> str:
# convert "name with spaces" -> "name_with_spaces" and unescape parens
return s.replace(' ', '_').replace('\\(', '(').replace('\\)', ')')
# Function to extract tags
def extract_tags(tree):
tags_with_positions = []
def _traverse(node):
if isinstance(node, Token) and node.type == '__ANON_1':
tag_position = node.start_pos
tag_text = node.value
tags_with_positions.append((tag_text, tag_position, "tag"))
elif not isinstance(node, Token):
for child in node.children:
_traverse(child)
_traverse(tree)
return tags_with_positions
special_tags = ["score:0", "score:1", "score:2", "score:3", "score:4", "score:5", "score:6", "score:7", "score:8", "score:9", "rating:s", "rating:q", "rating:e"]
def remove_special_tags(original_string):
tags = [tag.strip() for tag in original_string.split(",")]
remaining_tags = [tag for tag in tags if tag not in special_tags]
removed_tags = [tag for tag in tags if tag in special_tags]
return ", ".join(remaining_tags), removed_tags
#Model specific tags
MODEL_SPECIFIC_TAGS = {
"masterpiece",
"best quality",
"good quality",
"normal quality",
"newest",
"absurdres",
"highres",
"safe",
"worst quality",
"early",
"low quality",
"lowres",
"explict content",
"very awa",
"worst aesthetic",
"score_9",
"score_8_up",
"score_7_up",
"score_6_up",
"score_5_up",
"score_4_up",
"source_pony",
"source_furry",
"source_cartoon",
"source_anime",
"rating_safe",
"rating_questionable",
"rating_explicit"
}
# Define a function to load all necessary components
def load_model_components(file_path):
# Ensure the file path is a Path object for robust path handling
file_path = Path(file_path)
# Check if the file exists
if not file_path.is_file():
raise FileNotFoundError(f"The specified joblib file was not found: {file_path}")
# Load all the model components from the joblib file
model_components = joblib.load(file_path)
# Create a reverse mapping from row index to tag
if 'tag_to_row_index' in model_components:
model_components['row_to_tag'] = {idx: tag for tag, idx in model_components['tag_to_row_index'].items()}
return model_components
# Load all components at the start
tf_idf_components = load_model_components('tf_idf_files_420.joblib')
idf = tf_idf_components['idf']
if isinstance(idf, dict):
# idf is term -> idf_value; build a column-aligned vector
t2c = tf_idf_components['tag_to_column_index']
n_cols = max(t2c.values()) + 1
idf_by_col = np.ones(n_cols, dtype=np.float32)
for term, col in t2c.items():
idf_by_col[col] = float(idf.get(term, 1.0))
tf_idf_components['idf'] = idf_by_col
nsfw_tags = set() # Initialize an empty set to store words meeting the threshold
# Open and read the CSV file
with open("word_rating_probabilities.csv", 'r', newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # Skip the header row
for row in reader:
word = row[0] # The word is in the first column
probability_sum = float(row[1]) # The sum of probabilities is in the second column, convert to float for comparison
# Check if the probability sum meets the threshold and add the word to the set if it does
if probability_sum >= nsfw_threshold:
nsfw_tags.add(word)
# Read the set of valid artists into memory.
artist_set = set()
with open("fluffyrock_3m.csv", 'r', newline='', encoding='utf-8') as csvfile:
"""
Load artist names from a CSV file and store them in the global set.
Artist tags start with 'by_' and the prefix will be removed.
"""
reader = csv.reader(csvfile)
for row in reader:
tag_name = row[0] # Assuming the first column contains the tag names
if tag_name.startswith('by_'):
# Strip 'by_' from the start of the tag name and add to the set
artist_name = tag_name[3:] # Remove the first three characters 'by_'
artist_set.add(artist_name)
def is_artist(name):
return name in artist_set
sample_images_directory_path = 'sampleimages'
def generate_artist_image_tuples(top_artists, image_directory):
json_files = glob.glob(f'{image_directory}/*.json')
if not json_files:
return [], [] # no mapping present; return empty galleries safely
json_file_path = json_files[0]
with open(json_file_path, 'r') as json_file:
artist_to_file_map = json.load(json_file)
# DEBUG: mapping + baseline info
logging.debug("Gallery %s: loaded %d entries (map file=%s)",
image_directory, len(artist_to_file_map), json_file_path)
_base = artist_to_file_map.get("")
logging.debug(
"Gallery %s: baseline '' -> %r (exists=%s)",
image_directory,
_base,
os.path.exists(os.path.join(image_directory, _base)) if _base else None,
)
baseline_tuple = []
filename = artist_to_file_map.get("")
if filename:
image_path = os.path.join(image_directory, filename)
if os.path.exists(image_path):
baseline_tuple = [(image_path, "No Artist")]
artist_image_tuples = []
for artist in top_artists:
filename = artist_to_file_map.get(artist)
# DEBUG: per-artist resolution
logging.debug(
"Gallery %s: %s -> %r (exists=%s)",
image_directory,
artist,
filename,
os.path.exists(os.path.join(image_directory, filename)) if filename else None,
)
if filename:
image_path = os.path.join(image_directory, filename)
if os.path.exists(image_path):
artist_image_tuples.append((image_path, artist if artist else "No Artist"))
return baseline_tuple, artist_image_tuples
def clean_tag(tag):
return ''.join(char for char in tag if ord(char) < 128)
#Normally returns tag to aliases, but when reverse=True, returns alias to tags
def build_aliases_dict(filename, reverse=False):
aliases_dict = {}
with open(filename, 'r', newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
tag = clean_tag(row[0])
alias_list = [] if row[3] == "null" else [clean_tag(alias) for alias in row[3].split(',')]
if reverse:
for alias in alias_list:
aliases_dict.setdefault(alias, []).append(tag)
else:
aliases_dict[tag] = alias_list
return aliases_dict
def build_tag_count_dict(filename):
with open(filename, 'r', newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
result_dict = {}
for row in reader:
key = row[0]
value = int(row[2]) if row[2].isdigit() else None
if value is not None:
result_dict[key] = value
return result_dict
import csv
def build_tag_id_wiki_dict(filename='wiki_pages-2023-08-08.csv'):
"""
Reads a CSV file and returns a dictionary mapping tag names to tuples of
(number, most relevant line from the wiki entry). Rows with a non-integer in the first column are ignored.
The most relevant line is the first line that does not start with "thumb" and is not blank.
Parameters:
- filename: The path to the CSV file.
Returns:
- A dictionary where each key is a tag name and each value is a tuple (number, most relevant wiki entry line).
"""
tag_data = {}
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
# Skip the header row
next(reader)
for row in reader:
try:
# Attempt to convert the first column to an integer
number = int(row[0])
except ValueError:
# If conversion fails, skip this row
continue
tag = row[3]
wiki_entry_full = row[4]
# Process the wiki_entry to find the most relevant line
relevant_line = ''
for line in wiki_entry_full.split('\n'):
if line.strip() and not line.startswith("thumb"):
relevant_line = line
break
# Map the tag to a tuple of (number, relevant_line)
tag_data[tag] = (number, relevant_line)
return tag_data
def create_html_tables_for_tags(subtable_heading, item_heading, word_similarity_tuples, tag2count, tag2idwiki):
# Wrap the tag part in a with styles for bold and larger font
html_str = f"| {subtable_heading} |
|---|
| {item_heading} | Similarity | Count |
"
# Loop through the results and add table rows for each
for word, sim in word_similarity_tuples:
word_with_underscores = word.replace(' ', '_')
word_with_escaped_parentheses = escape_parens_for_display(word)
count = tag2count.get(word_with_underscores.replace("\\(", "(").replace("\\)", ")"), 0) # Get the count if available, otherwise default to 0
tag_id, wiki_entry = tag2idwiki.get(word_with_underscores, (None, ''))
# Check if tag_id and wiki_entry are valid
if tag_id is not None and wiki_entry:
# Construct the URL for the tag's wiki page
wiki_url = f"https://e621.net/wiki_pages/{tag_id}"
# Make the tag a hyperlink with a tooltip
tag_element = (
f"{word_with_escaped_parentheses}"
)
else:
# Display the word without any hyperlink or tooltip
tag_element = word_with_escaped_parentheses
# Include the tag element in the table row
html_str += f"| {tag_element} | {round(sim, 3)} | {count} |
"
html_str += "
"
return html_str
def create_top_artists_table(top_artists):
html_str = ""
html_str += "
Top Artists
"
html_str += "
"
html_str += "| Artist | Similarity |
"
for artist, score in top_artists:
artist_disp = escape_html(escape_parens_for_display(artist))
similarity_percentage = "{:.1f}%".format(score * 100)
html_str += (
f"| {artist_disp} | "
f"{similarity_percentage} |
"
)
html_str += "
"
return html_str
def construct_pseudo_vector(pseudo_doc_terms, idf, term_to_column_index):
cols, data = [], []
for term, w in pseudo_doc_terms.items():
j = term_to_column_index.get(term)
if j is None:
continue
cols.append(j)
data.append(w * idf[j])
n_cols = len(idf)
indptr = [0, len(cols)]
return csr_matrix((data, cols, indptr), shape=(1, n_cols), dtype=np.float32)
def get_top_indices(reduced_pseudo_vector, reduced_matrix):
# Compute cosine similarities
similarities = cosine_similarity(reduced_pseudo_vector, reduced_matrix).flatten()
# Get sorted tag indices based on similarities, in descending order
sorted_indices = np.argsort(-similarities)
# Return the top N indices
return sorted_indices
def get_tfidf_reduced_similar_tags(pseudo_doc_terms, allow_nsfw_tags):
idf = tf_idf_components['idf']
term_to_column_index = tf_idf_components['tag_to_column_index']
row_to_tag = tf_idf_components['row_to_tag']
svd = tf_idf_components['svd_model']
# 1) Build the pseudo TF-IDF, reduce to SVD space (unchanged)
pseudo_tfidf_vector = construct_pseudo_vector(pseudo_doc_terms, idf, term_to_column_index)
reduced_pseudo_vector = svd.transform(pseudo_tfidf_vector) # shape (1, D)
# 2) ANN: only fetch nearest non-artist candidates (no full-matrix cosine)
K = 2000 # tune for speed/recall
top_inds, top_sims = _ann_tags_topk(reduced_pseudo_vector, k=K)
# 3) Build similarity dict from those candidates
tag_similarity_dict = {}
for i, sim in zip(top_inds, top_sims):
tag = row_to_tag.get(int(i))
if tag is not None:
tag_similarity_dict[tag] = float(sim)
if not allow_nsfw_tags:
tag_similarity_dict = {t: s for t, s in tag_similarity_dict.items() if t not in nsfw_tags}
# 4) Sort & escape like before
sorted_tag_similarity_dict = OrderedDict(sorted(tag_similarity_dict.items(), key=lambda x: x[1], reverse=True))
transformed_sorted_tag_similarity_dict = OrderedDict(
(key.replace('_', ' ').replace('(', '\\(').replace(')', '\\)'), val)
for key, val in sorted_tag_similarity_dict.items()
)
return transformed_sorted_tag_similarity_dict
def create_html_placeholder(title="", content="", placeholder_height=400, placeholder_width="100%"):
# Include a title in the same style as the top artists table heading
html_placeholder = f"{title}
"
# Conditionally add content if present
if content:
html_placeholder += f""
# Add the placeholder div with specified height and width
html_placeholder += f""
return html_placeholder
def find_similar_tags(test_tags, tag_to_context_similarity, context_similarity_weight, allow_nsfw_tags):
#Initialize stuff
if not hasattr(find_similar_tags, "fasttext_small_model"):
find_similar_tags.fasttext_small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load('e621FastTextModel010Replacement_small.bin')
tag_aliases_file = 'fluffyrock_3m.csv'
if not hasattr(find_similar_tags, "tag2aliases"):
find_similar_tags.tag2aliases = build_aliases_dict(tag_aliases_file)
if not hasattr(find_similar_tags, "alias2tags"):
find_similar_tags.alias2tags = build_aliases_dict(tag_aliases_file, reverse=True)
if not hasattr(find_similar_tags, "tag2count"):
find_similar_tags.tag2count = build_tag_count_dict(tag_aliases_file)
if not hasattr(find_similar_tags, "tag2idwiki"):
find_similar_tags.tag2idwiki = build_tag_id_wiki_dict()
modified_tags = [tag_info['modified_tag'] for tag_info in test_tags]
transformed_tags = [tag.replace(' ', '_') for tag in modified_tags]
# Find similar tags and prepare data for tables
html_content = ""
html_content += "
Unknown Tags
" # Heading for the table
tags_added = False
bad_entities = []
known_entities_in_prompt = []
encountered_modified_tags = set()
for tag_info in test_tags:
original_tag = tag_info['original_tag']
modified_tag = tag_info['modified_tag']
start_pos = tag_info['start_pos']
end_pos = tag_info['end_pos']
node_type = tag_info['node_type']
# Build the underscore form up-front
modified_tag_for_search = modified_tag.replace(' ', '_')
if modified_tag in special_tags:
bad_entities.append({"entity":"Special", "start":start_pos, "end":end_pos})
continue
# Only accept exact underscore model-specific tokens (e.g., "score_9")
# special score/rating tags (kept as-is)
if modified_tag in special_tags:
bad_entities.append({"entity": "Special", "start": start_pos, "end": end_pos})
continue
# Model-specific tokens must match the user's input *exactly* (no pre-normalization).
# Use the original token as typed in the prompt, lowercased.
original_raw = tag_info["original_tag"].strip().lower()
if original_raw in MODEL_SPECIFIC_TAGS:
bad_entities.append({"entity": "Model Specific", "start": start_pos, "end": end_pos})
continue
if modified_tag in encountered_modified_tags:
bad_entities.append({"entity":"Duplicate", "start":start_pos, "end":end_pos})
continue
encountered_modified_tags.add(modified_tag)
norm_artist = (
modified_tag_for_search
.lower()
.removeprefix('by_') # tolerate users typing "by something" or not
)
if is_artist(norm_artist):
by_key = f"by_{norm_artist}"
# try by_* first, then raw form as fallback
count = (find_similar_tags.tag2count.get(by_key) or
find_similar_tags.tag2count.get(modified_tag_for_search, 0))
tag_id, wiki_entry = (
find_similar_tags.tag2idwiki.get(by_key) or
find_similar_tags.tag2idwiki.get(modified_tag_for_search, (None, ''))
)
wiki_url = f"https://e621.net/wiki_pages/{tag_id}" if tag_id is not None and wiki_entry else ""
known_entities_in_prompt.append({
"entity": "Known Tag",
"start": start_pos,
"end": end_pos,
"count": count,
"wiki_url": wiki_url,
"wiki_entry": wiki_entry
})
continue
similar_words = find_similar_tags.fasttext_small_model.most_similar(modified_tag_for_search, topn = 100)
result, seen = [], set(transformed_tags)
if modified_tag_for_search in find_similar_tags.tag2aliases:
if modified_tag in find_similar_tags.tag2aliases and "_" in modified_tag: #Implicitly tell the user that they should get rid of the underscore
result.append((modified_tag_for_search.replace('_',' '), 1))
seen.add(modified_tag)
else: #The user correctly did not put underscores in their tag
count = find_similar_tags.tag2count.get(modified_tag_for_search, 0) # Get the count if available, otherwise default to 0
tag_id, wiki_entry = find_similar_tags.tag2idwiki.get(modified_tag_for_search, (None, ''))
# Check if tag_id and wiki_entry are valid
wiki_url = ""
if tag_id is not None and wiki_entry:
# Construct the URL for the tag's wiki page
wiki_url = f"https://e621.net/wiki_pages/{tag_id}"
known_entities_in_prompt.append({"entity":"Known Tag", "start":start_pos, "end":end_pos, "count":count, "wiki_url":wiki_url, "wiki_entry":wiki_entry})
continue
else:
for item in similar_words:
similar_word, similarity = item
if similar_word not in seen:
if similar_word in find_similar_tags.tag2aliases:
result.append((similar_word.replace('_', ' '), round(similarity, 3)))
seen.add(similar_word)
else:
for similar_tag in find_similar_tags.alias2tags.get(similar_word, []):
if similar_tag not in seen:
result.append((similar_tag.replace('_', ' '), round(similarity, 3)))
seen.add(similar_tag)
# Remove NSFW tags if appropriate.
if not allow_nsfw_tags:
result = [(w, s) for (w, s) in result if w.replace(' ', '_') not in nsfw_tags]
# --- Context re-scoring (keys match how get_tfidf_reduced_similar_tags formats them) ---
def _ctx_score(name: str) -> float:
v = tag_to_context_similarity.get(name)
if v is None:
# TF-IDF dict escapes parentheses; candidates from FT do not.
v = tag_to_context_similarity.get(name.replace('(', '\\(').replace(')', '\\)'))
return float(v) if v is not None else 0.0
# If the slider is at 1.0, only keep candidates that exist in the TF-IDF context list.
if context_similarity_weight >= 0.999:
ctx_keys = set(tag_to_context_similarity.keys())
result = [
(w, s) for (w, s) in result
if (w in ctx_keys) or (w.replace('(', '\\(').replace(')', '\\)') in ctx_keys)
]
# Linear blend: final = (1-λ)*fasttext + λ*context (no extra 0.5 scaling)
result = [
(w, (1.0 - context_similarity_weight) * s + context_similarity_weight * _ctx_score(w))
for (w, s) in result
]
result = sorted(result, key=lambda x: x[1], reverse=True)[:20]
html_content += create_html_tables_for_tags(modified_tag, "Corrected Tag", result, find_similar_tags.tag2count, find_similar_tags.tag2idwiki)
bad_entities.append({"entity":"Unknown Tag", "start":start_pos, "end":end_pos})
tags_added=True
# If no tags were processed, add a message; otherwise close the wrapper div
if not tags_added:
html_content = create_html_placeholder(title="Unknown Tags", content="No Unknown Tags Found")
else:
html_content += ""
return html_content, bad_entities, known_entities_in_prompt
def build_tag_offsets_dicts(new_image_tags_with_positions):
# Structure the data for HighlightedText
tag_data = []
for tag_text, start_pos, nodetype in new_image_tags_with_positions:
# Modify the tag
modified_tag = tag_text.replace('_', ' ').replace('\\(', '(').replace('\\)', ')').strip()
artist_matrix_tag = tag_text.replace('_', ' ').replace('\\(', '\(').replace('\\)', '\)').strip()
tf_idf_matrix_tag = re.sub(r'\\([()])', r'\1', re.sub(r' ', '_', tag_text.strip().removeprefix('by ').removeprefix('by_')))
# Calculate the end position based on the original tag length
end_pos = start_pos + len(tag_text)
# Append the structured data for each tag
tag_data.append({
"original_tag": tag_text,
"start_pos": start_pos,
"end_pos": end_pos,
"modified_tag": modified_tag,
"artist_matrix_tag": artist_matrix_tag,
"tf_idf_matrix_tag": tf_idf_matrix_tag,
"node_type": nodetype
})
return tag_data
def augment_bad_entities_with_regex(text):
bad_entities = []
#comma at end
match = re.search(r',(?=\s*$)', text)
if match:
index = match.start()
bad_entities.append({"entity":"Remove Final Comma", "start":index, "end":index+1})
match = re.search(r'\([^()]*(,)\s*\)\s*$', text)
if match:
index = match.start(1)
bad_entities.append({"entity":"Remove Final Comma", "start":index, "end":index+1})
match = re.search(r'\([^()]*(,)\s*:\s*\d+(\.\d+)?\s*\)\s*$', text)
if match:
index = match.start(1)
bad_entities.append({"entity":"Remove Final Comma", "start":index, "end":index+1})
# Comma after parentheses, multiple occurrences
for match in re.finditer(r'(?", ">").replace('"', """).replace("'", "'")
def escape_parens_for_display(s: str) -> str:
# ensure single backslash before any literal parens in display text
return (
s.replace("\\(", "(")
.replace("\\)", ")")
.replace("(", "\\(")
.replace(")", "\\)")
)
def format_annotated_html(bad_entities, known_entities, text):
tooltip_map = {
"Unknown Tag": "This may not be a valid e621 tag. Consider removing or replacing it with tag(s) from the \"Unknown Tags\" section.",
"Duplicate": "This tag has appeared multiple times in your prompt. Consider removing the copies.",
"Remove Final Comma": "There should be no comma at the end of your prompt. Consider removing it.",
"Move Comma Inside Parentheses": "In most e621-based models, the comma following a tag functions as an "attention anchor", carrying most of the tag's information. It should therefore be assigned the same weight as the rest of the tag. So instead of "(lineless:1.1),", consider "(lineless,:1.1)" or "(lineless,)"",
"Double Comma": "One comma between tags is considered ample.",
"Model Specific": "This is not an e621 tag, but may still be valid with the right model. Check your model's documentation. If the tag is not mentioned in the documentation, do not use it."
}
color_map = {
"Double Comma": ("black", "#E69F00"), # Orange — black text
"Duplicate": ("black", "#F0E442"), # Yellow — black text
"Model Specific": ("black", "#56B4E9"), # Sky blue — black text
"Move Comma Inside Parentheses": ("white", "#009E73"), # Bluish green
"Remove Final Comma": ("white", "#0072B2"), # Blue
"Unknown Tag": ("white", "#D55E00"), # Vermilion — danger
}
# Splice from the original text so indexes stay valid.
combined = sorted(bad_entities + known_entities, key=lambda x: x["start"], reverse=True)
html_text = text
for entity in combined:
start = entity["start"]
end = entity["end"]
label = entity["entity"]
# Escape only the replaced segment (keeps indices correct).
segment = text[start:end]
disp = escape_html(escape_parens_for_display(segment))
if label == "Known Tag":
wiki_url = entity.get("wiki_url", "")
count = entity.get("count", 0)
wiki_entry = entity.get("wiki_entry", "")
sanitized_wiki_entry = escape_html(wiki_entry) if wiki_entry else "Unavailable"
if wiki_url:
html_part = (
f'{disp}'
)
else:
html_part = (
f'{disp}'
)
else:
fg, bg = color_map.get(label, ("black", "white"))
# Make single-character issues (commas) easier to see by padding the span
pad_labels = ("Remove Final Comma", "Move Comma Inside Parentheses", "Double Comma")
pad_class = "badpad" if label in pad_labels else ""
class_attr = f' class="{pad_class}"' if pad_class else ""
html_part = f'{disp}'
html_text = html_text[:start] + html_part + html_text[end:]
# Color key (only for labels that actually appeared)
color_key_html = "Key:"
used_labels = {e["entity"] for e in bad_entities}
for label, (fg, bg) in color_map.items():
if label in used_labels:
tooltip = tooltip_map.get(label, "")
color_key_html += (
f" {label}"
)
color_key_html += "
"
# Wrap the whole annotated area so we can place the hint inside it
annotated_box = (
""
f"{html_text}"
f"{TOOLTIP_NOTE_HTML}"
"
"
)
return annotated_box + color_key_html
def find_similar_artists(original_tags_string, top_n, context_similarity_weight, allow_nsfw_tags):
try:
new_tags_string = original_tags_string.lower()
new_tags_string, removed_tags = remove_special_tags(new_tags_string)
# Parse the prompt
parsed = parser.parse(new_tags_string)
# Extract tags from the parsed tree
new_image_tags = extract_tags(parsed)
tag_data = build_tag_offsets_dicts(new_image_tags)
#Suggested tags stuff
suggested_tags_html_content = ""
suggested_tags_html_content += "
Suggested Tags
" # Heading for the table
terms = [item["tf_idf_matrix_tag"] for item in tag_data] + removed_tags
suggested_tags = get_tfidf_reduced_similar_tags(dict(Counter(terms)), allow_nsfw_tags)
unseen_tags_data, bad_entities, known_entities = find_similar_tags(tag_data, suggested_tags, context_similarity_weight, allow_nsfw_tags)
#Bad tags stuff
bad_entities.extend(augment_bad_entities_with_regex(new_tags_string))
bad_entities.sort(key=lambda x: x['start'])
#bad_tags_illustrated_string = {"text":new_tags_string, "entities":bad_entities}
bad_tags_illustrated_html = format_annotated_html(bad_entities, known_entities, new_tags_string)
# Create a set of tags that should be filtered out
filter_tags = {entry["original_tag"].strip() for entry in tag_data}
filter_tags_norm = { _norm_tag_for_lookup(t.lower().removeprefix('by ').removeprefix('by_')) for t in filter_tags }
suggested_tags_filtered = OrderedDict(
(k, v) for k, v in suggested_tags.items()
if k not in filter_tags and _norm_tag_for_lookup(k.lower()) not in filter_tags_norm
)
# Splitting the dictionary into two based on the condition
def _norm_no_by(s: str) -> str:
n = _norm_tag_for_lookup(s)
return n[3:] if n.startswith("by_") else n
suggested_artist_tags_filtered = OrderedDict(
(k, v) for k, v in suggested_tags_filtered.items()
if is_artist(_norm_no_by(k))
)
suggested_non_artist_tags_filtered = OrderedDict(
(k, v) for k, v in suggested_tags_filtered.items()
if not is_artist(_norm_no_by(k)) and k not in special_tags
)
topnsuggestions = list(islice(suggested_non_artist_tags_filtered.items(), 100))
suggested_tags_html_content += create_html_tables_for_tags("-", "Suggested Tag", topnsuggestions, find_similar_tags.tag2count, find_similar_tags.tag2idwiki)
suggested_tags_html_content += ""
# --- Artist stuff: query artist-only index directly ---
idf_vec = tf_idf_components['idf']
t2c = tf_idf_components['tag_to_column_index']
svd = tf_idf_components['svd_model']
pseudo_terms = dict(Counter(terms))
pseudo_vec = construct_pseudo_vector(pseudo_terms, idf_vec, t2c)
reduced_q = svd.transform(pseudo_vec)
K_art = max(100, top_n * 10) # widen search to stabilize ranks
art_inds, art_sims = _ann_artists_topk(reduced_q, k=K_art)
row_to_tag = tf_idf_components['row_to_tag']
bad_labels = {"by_unknown_artist", "by_conditional_dnp", "unknown_artist", "conditional_dnp"}
top_artists_raw = []
for idx_i, sim in zip(art_inds, art_sims):
tag = row_to_tag.get(int(idx_i), "")
if not tag:
continue
# Normalize spaces to underscores for reliable checks
norm = tag.replace(" ", "_")
# Drop known non-artist placeholders
if norm in bad_labels:
continue
# Accept either "by_foo" or plain "foo"
base = norm[3:] if norm.startswith("by_") else norm
# Guard: only keep if this *really* is an artist we know
if not is_artist(base):
continue
name_disp = base.replace("_", " ")
top_artists_raw.append((name_disp, float(sim)))
if not top_artists_raw:
logging.debug("No artist hits. First few neighbor labels: %s",
[row_to_tag.get(int(i), "") for i in art_inds[:10]])
# take the best unique names, in order
seen = set()
deduped = []
for n, s in top_artists_raw:
if n not in seen:
deduped.append((n, s))
seen.add(n)
if len(deduped) >= top_n:
break
top_artists = deduped
logging.debug("Top artists (n=%d): %s", len(top_artists), top_artists)
top_artists_str = create_top_artists_table(top_artists)
dynamic_prompts_formatted_artists = "{" + "|".join([artist for artist, _ in top_artists]) + "}"
dynamic_prompts_formatted_artists = "{" + "|".join(
[escape_parens_for_display(artist) for artist, _ in top_artists]
) + "}"
image_galleries = []
for root, dirs, files in os.walk(sample_images_directory_path):
for name in dirs:
baseline, artists = generate_artist_image_tuples([name for name, _ in top_artists], os.path.join(root, name))
dir_path = os.path.join(root, name)
baseline, artists = generate_artist_image_tuples([n for n, _ in top_artists], dir_path)
logging.debug("Gallery built for %s -> baseline=%d, artists_found=%d", dir_path, len(baseline), len(artists))
image_galleries.append(baseline) # Add baseline as its own gallery item
image_galleries.append(artists) # Extend the list with artist tuples
return (unseen_tags_data, bad_tags_illustrated_html, suggested_tags_html_content, top_artists_str, dynamic_prompts_formatted_artists, *image_galleries)
except ParseError:
# Build empty galleries so the tuple length matches the declared outputs
empty_galleries = []
for _root, _dirs, _files in os.walk(sample_images_directory_path):
for _ in _dirs:
empty_galleries.extend([[], []]) # one empty list per Gallery component
return (
create_html_placeholder(title="Unknown Tags", content="Parse Error"),
"Parse Error: Check for mismatched parentheses or something",
create_html_placeholder(title="Suggested Tags"),
"", # top_artists
"", # dynamic_prompts
*empty_galleries,
)
with gr.Blocks(css=css) as app:
with gr.Group():
with gr.Row():
with gr.Column(scale=3, elem_classes=["prompt-col"]):
image_tags = gr.Textbox(
label="Enter Prompt",
placeholder="e.g. fox, outside, detailed background, ...",
lines=1 # Enter submits (see .submit() below)
)
bad_tags_illustrated_string = gr.HTML()
with gr.Column(scale=1):
# Load once so Gradio serves it directly (no /file= URL needed)
_mascot_pil = Image.open(MASCOT_FILE).convert("RGBA")
mascot_img = gr.Image(
value=_mascot_pil, # pass the in-memory image
show_label=False,
interactive=False,
height=220, # keep the same visual size
elem_id="mascot"
)
submit_button = gr.Button(variant="primary")
with gr.Row():
with gr.Column(scale=3):
with gr.Group():
with gr.Row():
context_similarity_weight = gr.Slider(minimum=0, maximum=1, value=0.5, step=0.1, label="Context Similarity Weight")
allow_nsfw = gr.Checkbox(label="Include Mature Tags", value=False)
with gr.Row():
with gr.Column(scale=2, elem_classes=["pane-left"]):
unseen_tags = gr.HTML(
label="Unknown Tags",
value=create_html_placeholder(title="Unknown Tags"),
elem_id="unseen_html",
elem_classes=["scroll-fade"],
)
with gr.Column(scale=1, elem_classes=["pane-right"]):
suggested_tags = gr.HTML(
label="Suggested Tags",
value=create_html_placeholder(title="Suggested Tags"),
elem_id="suggested_html",
elem_classes=["scroll-fade"],
)
with gr.Column(scale=1):
with gr.Group():
num_artists = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Number of artists")
top_artists = gr.HTML(
label="Top Artists",
value=create_html_placeholder(title="Top Artists"),
elem_id="artists_html",
elem_classes=["scroll-fade"],
)
gr.HTML("""
""", visible=False)
dynamic_prompts = gr.Textbox(label="Dynamic Prompts Format", info="For if you're using the Automatic1111 webui (https://github.com/AUTOMATIC1111/stable-diffusion-webui) with the Dynamic Prompts extension activated (https://github.com/adieyal/sd-dynamic-prompts) and want to try them all individually.")
galleries = []
for root, dirs, files in os.walk(sample_images_directory_path):
for name in dirs:
with gr.Row():
baseline = gr.Gallery(allow_preview=False, rows=1, columns=1, height=420, scale=3)
styles = gr.Gallery(allow_preview=False, rows=2, columns=5, height=420, scale=8)
galleries.extend([baseline, styles])
submit_button.click(
find_similar_artists,
inputs=[image_tags, num_artists, context_similarity_weight, allow_nsfw],
outputs=[unseen_tags, bad_tags_illustrated_string, suggested_tags, top_artists, dynamic_prompts] + galleries
)
# Also run when pressing Enter in the prompt box
image_tags.submit(
find_similar_artists,
inputs=[image_tags, num_artists, context_similarity_weight, allow_nsfw],
outputs=[unseen_tags, bad_tags_illustrated_string, suggested_tags, top_artists, dynamic_prompts] + galleries
)
gr.Markdown(faq_content)
app.queue().launch(allowed_paths=[str(MASCOT_DIR)])