File size: 11,746 Bytes
772b344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271

# modules/tagger_refinement.py

import os
import re
import csv
import joblib
import numpy as np
import pathlib
import logging
from collections import Counter, OrderedDict
from lark import Lark, Token
from lark.exceptions import ParseError
import hnswlib
import compress_fasttext
from scipy.sparse import csr_matrix

# --- LOGGING SETUP (Sessizleştirme) ---
logging.basicConfig(level=logging.WARNING)
for _name in ("gensim", "hnswlib"):
    logging.getLogger(_name).setLevel(logging.ERROR)

# --- Dosya Yolları Sabitleri (Veri ve Modeller) ---
PATH_TF_IDF = "models/binaries/tf_idf_files_420.joblib"
PATH_HNSW_TAGS = "models/binaries/tfidf_hnsw_tags.bin"
PATH_FASTTEXT = "models/binaries/e621FastTextModel010Replacement_small.bin"
PATH_DB_RATING = "data/databases/word_rating_probabilities.csv"
PATH_DB_FLUFFYROCK = "data/databases/fluffyrock_3m.csv"

# --- GLOBAL MODEL DEĞİŞKENLERİ ---
_HNSW_TAG = None
_HNSW_DIM = None
_HNSW_N_TAG = None
_TF_IDF_COMPONENTS = None
_FASTTEXT_MODEL = None
_TAG_DATA_CACHE = None 
_NSFW_TAGS = None
_ARTIST_SET = None

# --- PARSER (Lark Grammar) ---
grammar = r"""

!start: (prompt | /[][():]/+)*

prompt: (emphasized | plain | comma | WHITESPACE)*

!emphasized: "(" prompt ")"

        | "(" prompt ":" [WHITESPACE] NUMBER [WHITESPACE] ")"

comma: ","

WHITESPACE: /\s+/

plain: /([^,\\\[\]():|]|\\.)+/

%import common.SIGNED_NUMBER -> NUMBER

"""
parser = Lark(grammar, start='start')

def load_tf_idf_components():
    global _TF_IDF_COMPONENTS, _NSFW_TAGS, _ARTIST_SET
    
    if _TF_IDF_COMPONENTS is None:
        fname = PATH_TF_IDF
        if os.path.exists(fname):
            try:
                _TF_IDF_COMPONENTS = joblib.load(fname)
                if 'tag_to_row_index' in _TF_IDF_COMPONENTS:
                    _TF_IDF_COMPONENTS['row_to_tag'] = {idx: tag for tag, idx in _TF_IDF_COMPONENTS['tag_to_row_index'].items()}
                idf = _TF_IDF_COMPONENTS['idf']
                if isinstance(idf, dict):
                    t2c = _TF_IDF_COMPONENTS['tag_to_column_index']
                    n_cols = max(t2c.values()) + 1
                    idf_by_col = np.ones(n_cols, dtype=np.float32)
                    for term, col in t2c.items(): idf_by_col[col] = float(idf.get(term, 1.0))
                    _TF_IDF_COMPONENTS['idf'] = idf_by_col
            except Exception:
                _TF_IDF_COMPONENTS = {}
        else:
            _TF_IDF_COMPONENTS = {}

    if _NSFW_TAGS is None:
        _NSFW_TAGS = set()
        if os.path.exists(PATH_DB_RATING):
            with open(PATH_DB_RATING, 'r', newline='', encoding='utf-8') as csvfile:
                reader = csv.reader(csvfile); next(reader, None)
                for row in reader:
                    if float(row[1]) >= 0.95: _NSFW_TAGS.add(row[0])

    if _ARTIST_SET is None:
        _ARTIST_SET = set()
        if os.path.exists(PATH_DB_FLUFFYROCK):
            with open(PATH_DB_FLUFFYROCK, 'r', newline='', encoding='utf-8') as csvfile:
                reader = csv.reader(csvfile)
                for row in reader:
                    if row[0].startswith('by_'): _ARTIST_SET.add(row[0][3:])

def get_tag_data_cache():
    global _TAG_DATA_CACHE
    if _TAG_DATA_CACHE is None:
        if not os.path.exists(PATH_DB_FLUFFYROCK): return None
        def build_aliases(rev=False):
            d = {}
            with open(PATH_DB_FLUFFYROCK, 'r', newline='', encoding='utf-8') as f:
                r = csv.reader(f)
                for row in r:
                    tag = ''.join(c for c in row[0] if ord(c)<128)
                    alist = [] if row[3] == "null" else [''.join(c for c in alias if ord(c)<128) for alias in row[3].split(',')]
                    if rev:
                        for a in alist: d.setdefault(a, []).append(tag)
                    else: d[tag] = alist
            return d
        def build_counts():
            d = {}
            with open(PATH_DB_FLUFFYROCK, 'r', newline='', encoding='utf-8') as f:
                r = csv.reader(f)
                for row in r:
                    if row[2].isdigit(): d[row[0]] = int(row[2])
            return d
        _TAG_DATA_CACHE = {'tag2aliases': build_aliases(), 'alias2tags': build_aliases(rev=True), 'tag2count': build_counts()}
    return _TAG_DATA_CACHE

def is_artist(name):
    load_tf_idf_components()
    return name in _ARTIST_SET

def _l2_normalize_rows(mat):
    mat = np.asarray(mat, dtype=np.float32)
    norms = np.linalg.norm(mat, axis=1, keepdims=True)
    norms[norms == 0.0] = 1.0
    return mat / norms

def _ensure_hnsw_indexes():
    global _HNSW_TAG, _HNSW_DIM, _HNSW_N_TAG
    if _HNSW_TAG is not None: return
    load_tf_idf_components()
    if not _TF_IDF_COMPONENTS: return
    reduced_matrix = _TF_IDF_COMPONENTS.get('reduced_matrix')
    row_to_tag = _TF_IDF_COMPONENTS.get('row_to_tag')
    if reduced_matrix is None: return
    rm = _l2_normalize_rows(reduced_matrix).astype(np.float32)
    n_items, dim = rm.shape
    _HNSW_DIM = dim
    tag_rows = []
    for i in range(n_items):
        tag = row_to_tag.get(i, "")
        base = tag[3:] if tag.startswith("by_") else tag
        if tag in {"by_unknown_artist", "by_conditional_dnp"}: tag_rows.append(i); continue
        if not is_artist(base): tag_rows.append(i)
    
    tag_path = pathlib.Path(PATH_HNSW_TAGS)
    idx = hnswlib.Index(space='cosine', dim=dim)
    if tag_path.exists():
        try:
            idx.load_index(str(tag_path), max_elements=max(1, len(tag_rows)))
            idx.set_ef(200)
            _HNSW_TAG = idx
            _HNSW_N_TAG = len(tag_rows)
        except Exception: pass

def _hnsw_query(vec, k=2000):
    _ensure_hnsw_indexes()
    if _HNSW_TAG is None: return [], []
    q = np.asarray(vec, dtype=np.float32).reshape(-1)
    q_norm = np.linalg.norm(q)
    if q_norm > 0: q = q / q_norm
    labels, dists = _HNSW_TAG.knn_query(q, k=min(k, _HNSW_N_TAG))
    return labels[0], 1.0 - dists[0]

special_tags = ["score:0", "score:1", "score:2", "score:3", "score:4", "score:5", "score:6", "score:7", "score:8", "score:9", "rating:s", "rating:q", "rating:e"]
MODEL_SPECIFIC_TAGS = {"masterpiece", "best quality", "good quality", "normal quality", "low quality", "worst quality", "highres", "lowres", "absurdres", "source_pony", "source_furry", "rating_safe", "rating_explicit", "rating_questionable"}

def remove_special_tags(original_string):
    tags = [tag.strip() for tag in original_string.split(",")]
    remaining = [t for t in tags if t not in special_tags]
    return ", ".join(remaining)

def extract_tags(tree):
    tags_with_positions = []
    def _traverse(node):
        if isinstance(node, Token) and node.type == '__ANON_1': tags_with_positions.append((node.value, node.start_pos))
        elif not isinstance(node, Token):
            for child in node.children: _traverse(child)
    _traverse(tree)
    return tags_with_positions

def construct_pseudo_vector(pseudo_doc_terms):
    load_tf_idf_components()
    if not _TF_IDF_COMPONENTS: return None
    idf = _TF_IDF_COMPONENTS['idf']
    t2c = _TF_IDF_COMPONENTS['tag_to_column_index']
    cols, data = [], []
    for term, w in pseudo_doc_terms.items():
        j = t2c.get(term)
        if j is None: continue
        cols.append(j); data.append(w * idf[j])
    return csr_matrix((data, cols, [0, len(cols)]), shape=(1, len(idf)), dtype=np.float32)

def get_similar_tags_tfidf(pseudo_doc_terms):
    load_tf_idf_components()
    if not _TF_IDF_COMPONENTS: return {}
    pseudo = construct_pseudo_vector(pseudo_doc_terms)
    if pseudo is None: return {}
    svd = _TF_IDF_COMPONENTS['svd_model']
    reduced = svd.transform(pseudo)
    top_inds, top_sims = _hnsw_query(reduced)
    row_to_tag = _TF_IDF_COMPONENTS['row_to_tag']
    sim_dict = {}
    for i, sim in zip(top_inds, top_sims):
        tag = row_to_tag.get(int(i))
        if tag and tag not in _NSFW_TAGS: sim_dict[tag] = float(sim)
    sorted_sim = OrderedDict(sorted(sim_dict.items(), key=lambda x: x[1], reverse=True))
    return OrderedDict((k.replace('_', ' ').replace('(', '\\(').replace(')', '\\)'), v) for k, v in sorted_sim.items())

def rafine_etiketler(tags_string, context_weight=0.5):
    global _FASTTEXT_MODEL
    if not tags_string or not tags_string.strip(): return ""
    load_tf_idf_components()
    data_cache = get_tag_data_cache()
    if _FASTTEXT_MODEL is None and os.path.exists(PATH_FASTTEXT):
        try: _FASTTEXT_MODEL = compress_fasttext.models.CompressedFastTextKeyedVectors.load(PATH_FASTTEXT)
        except: pass
    if (not _FASTTEXT_MODEL or not data_cache or not _TF_IDF_COMPONENTS) and context_weight < 0.01: return tags_string 

    try:
        input_str = remove_special_tags(tags_string.lower())
        try: parsed = parser.parse(input_str)
        except ParseError: return tags_string
        raw_tags = extract_tags(parsed)
        tag_data = []
        for tag_text, start_pos in raw_tags:
            mod_tag = tag_text.replace('_', ' ').replace('\\(', '(').replace('\\)', ')').strip()
            tf_idf_tag = re.sub(r'\\([()])', r'\1', re.sub(r' ', '_', tag_text.strip().removeprefix('by ').removeprefix('by_')))
            tag_data.append({"original_tag": tag_text, "modified_tag": mod_tag, "tf_idf_matrix_tag": tf_idf_tag})
        
        terms = [t["tf_idf_matrix_tag"] for t in tag_data]
        suggested_context = {}
        if _TF_IDF_COMPONENTS and _HNSW_TAG: suggested_context = get_similar_tags_tfidf(dict(Counter(terms)))
        
        valid_tags = []
        for item in tag_data:
            orig, mod = item["original_tag"], item["modified_tag"]
            search = mod.replace(' ', '_')
            if mod in special_tags or orig in MODEL_SPECIFIC_TAGS: continue 
            if not data_cache:
                valid_tags.append(orig); continue
            if is_artist(search.lower().removeprefix('by_')):
                valid_tags.append(orig); continue
            if search in data_cache['tag2count'] or search in data_cache['tag2aliases']:
                valid_tags.append(orig); continue
            if not _FASTTEXT_MODEL:
                if context_weight < 0.5: valid_tags.append(orig)
                continue

            similar_words = _FASTTEXT_MODEL.most_similar(search, topn=20)
            candidates = []
            seen_cand = set()
            for sim_word, sim in similar_words:
                 if sim_word in seen_cand: continue
                 if sim_word in data_cache['tag2aliases']:
                     candidates.append((sim_word.replace('_', ' '), sim)); seen_cand.add(sim_word)
                 else:
                     targets = data_cache['alias2tags'].get(sim_word, [])
                     for t in targets:
                         if t not in seen_cand: candidates.append((t.replace('_', ' '), sim)); seen_cand.add(t)

            scored_candidates = []
            for word, sim in candidates:
                ctx_score = float(suggested_context.get(word) or suggested_context.get(word.replace('(', '\\(').replace(')', '\\)')) or 0.0)
                final_score = (1.0 - context_weight) * sim + (context_weight * ctx_score)
                scored_candidates.append((word, final_score))
            scored_candidates.sort(key=lambda x: x[1], reverse=True)
            if scored_candidates:
                best_candidate, best_score = scored_candidates[0]
                if best_score > 0.6: valid_tags.append(best_candidate)
        return ", ".join(valid_tags)
    except Exception: return tags_string