LoloSemper commited on
Commit
41297dc
·
verified ·
1 Parent(s): fb1b4eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -777
app.py CHANGED
@@ -1,805 +1,101 @@
1
- # app.py — Universal Conlang Translator (Simplificado: Semi-lossless por default)
2
- # Archivos necesarios en la raíz:
3
- # - lexicon_minimax.json
4
- # - lexicon_komin.json
5
- # - lexicon_master.json
6
- #
7
- # requirements.txt (para HF Spaces):
8
- # gradio>=4.36.0
9
- # spacy>=3.7.4
10
- # es_core_news_sm @ https://github.com/explosion/spacy-models/releases/download/es_core_news_sm-3.7.0/es_core_news_sm-3.7.0-py3-none-any.whl
11
- # en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl
12
 
13
- import os
14
- import re
15
- import json
16
- import base64
17
- import zlib
18
- import hashlib
19
- from typing import Dict, Tuple, Optional
20
- import gradio as gr
21
 
22
- # ------------ Archivos esperados ------------
23
- LEX_MINI = "lexicon_minimax.json"
24
- LEX_KOMI = "lexicon_komin.json"
25
- LEX_MASTER = "lexicon_master.json"
26
 
27
- # ------------ Normalización ------------
28
- WORD_RE = re.compile(r"[A-Za-zÁÉÍÓÚÜÑáéíóúüñ]+", re.UNICODE)
29
- STRIP = str.maketrans("ÁÉÍÓÚÜÑáéíóúüñ", "AEIOUUNaeiouun")
 
 
30
 
31
- def norm_es(w: str) -> str:
32
- return re.sub(r"[^a-záéíóúüñ]", "", (w or "").lower()).translate(STRIP)
 
 
 
 
 
 
 
33
 
34
- def norm_en(w: str) -> str:
35
- return re.sub(r"[^a-z]", "", (w or "").lower())
36
-
37
- # ------------ Carga de léxicos ------------
38
- def load_json(path: str):
39
- if not os.path.exists(path): return None
40
- with open(path, "r", encoding="utf-8") as f:
41
- return json.load(f)
42
-
43
- def load_lexicons():
44
- mm = load_json(LEX_MINI) or {}
45
- kk = load_json(LEX_KOMI) or {}
46
- master = load_json(LEX_MASTER) or {}
47
-
48
- es2mini = mm.get("mapping", {})
49
- es2komi = kk.get("mapping", {})
50
- mini2es = {v:k for k,v in es2mini.items()}
51
- komi2es = {v:k for k,v in es2komi.items()}
52
-
53
- es2en_lemma: Dict[str,str] = {}
54
- en2es_lemma: Dict[str,str] = {}
55
- en2mini, en2komi = {}, {}
56
- mini2en, komi2en = {}, {}
57
-
58
- if isinstance(master, dict) and "entries" in master:
59
- for e in master["entries"]:
60
- es = norm_es(str(e.get("lemma_es","")))
61
- en = norm_en(str(e.get("lemma_en","")))
62
- mi = str(e.get("minimax",""))
63
- ko = str(e.get("komin",""))
64
- if es and en:
65
- es2en_lemma.setdefault(es, en)
66
- en2es_lemma.setdefault(en, es)
67
- if en and mi: en2mini.setdefault(en, mi)
68
- if en and ko: en2komi.setdefault(en, ko)
69
-
70
- mini2en = {v:k for k,v in en2mini.items()}
71
- komi2en = {v:k for k,v in en2komi.items()}
72
-
73
- return (es2mini, es2komi, mini2es, komi2es,
74
- en2mini, en2komi, mini2en, komi2en,
75
- es2en_lemma, en2es_lemma)
76
-
77
- (ES2MINI, ES2KOMI, MINI2ES, KOMI2ES,
78
- EN2MINI, EN2KOMI, MINI2EN, KOMI2EN,
79
- ES2EN_LEMMA, EN2ES_LEMMA) = load_lexicons()
80
-
81
- # ------------ OOV reversible (modo Semi-lossless) ------------
82
- ALPHA_MINI64 = "@ptkmnslraeiouy0123456789><=:/!?.+-_*#bcdfghjvqwxzACEGHIJKLMNOPRS"[:64]
83
- CJK_BASE = (
84
- "天地人日月山川雨風星火水木土金石光影花草鳥犬猫魚"
85
- "東西南北中外上下午夜明暗手口目耳心言書家道路門"
86
- "大小長短早晚高低新古青紅白黒金銀銅玉米茶酒米"
87
- "文学楽音画体気電海空森林雪雲砂島橋城村国自由静"
88
- )
89
- ALPHA_CJK64 = (CJK_BASE * 2)[:64]
90
-
91
- def to_custom_b64(b: bytes, alphabet: str) -> str:
92
- std = base64.b64encode(b).decode("ascii")
93
- trans = str.maketrans(
94
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
95
- alphabet
96
- )
97
- return std.translate(trans).rstrip("=")
98
-
99
- def from_custom_b64(s: str, alphabet: str) -> bytes:
100
- trans = str.maketrans(
101
- alphabet,
102
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
103
- )
104
- std = s.translate(trans)
105
- pad = "=" * ((4 - len(std) % 4) % 4)
106
- return base64.b64decode(std + pad)
107
-
108
- def enc_oov_minimax(token: str) -> str:
109
- return "~" + to_custom_b64(token.encode("utf-8"), ALPHA_MINI64)
110
- def dec_oov_minimax(code: str) -> str:
111
- try: return from_custom_b64(code[1:], ALPHA_MINI64).decode("utf-8")
112
- except Exception: return code
113
-
114
- def enc_oov_komin(token: str) -> str:
115
- return "「" + to_custom_b64(token.encode("utf-8"), ALPHA_CJK64) + "」"
116
- def dec_oov_komin(code: str) -> str:
117
- try: return from_custom_b64(code[1:-1], ALPHA_CJK64).decode("utf-8")
118
- except Exception: return code
119
-
120
- def is_oov_minimax(code: str) -> bool:
121
- return code.startswith("~") and len(code) > 1
122
- def is_oov_komin(code: str) -> bool:
123
- return len(code) >= 2 and code.startswith("「") and code.endswith("」")
124
-
125
- # ------------ spaCy opcional ------------
126
- USE_SPACY = False
127
- try:
128
- import spacy
129
- try:
130
- nlp_es = spacy.load("es_core_news_sm")
131
- nlp_en = spacy.load("en_core_web_sm")
132
- USE_SPACY = True
133
- except Exception:
134
- nlp_es = nlp_en = None
135
- except Exception:
136
- nlp_es = nlp_en = None
137
-
138
- def lemma_of(tok, src_lang: str) -> str:
139
- if src_lang == "Español":
140
- return norm_es(tok.lemma_ if tok.lemma_ else tok.text)
141
- else:
142
- return norm_en(tok.lemma_ if tok.lemma_ else tok.text)
143
-
144
- # ------------ Selección de oración predicativa ------------
145
- def pick_predicative_sentence(doc):
146
- sents = list(doc.sents) if doc.has_annotation("SENT_START") else [doc]
147
- candidates = []
148
- for s in sents:
149
- roots = [t for t in s if t.dep_ == "ROOT" and t.pos_ in ("VERB","AUX")]
150
- if not roots:
151
- continue
152
- root = roots[0]
153
- has_q = "?" in s.text
154
- has_subj = any(t.dep_.startswith("nsubj") for t in root.children)
155
- score = (1 if has_q else 0) + (1 if has_subj else 0) + (len(s) / 1000.0)
156
- candidates.append((score, s))
157
- if not candidates:
158
- return doc
159
- return sorted(candidates, key=lambda x: x[0], reverse=True)[0][1].as_doc()
160
 
 
161
  def is_content_token(t) -> bool:
162
- if t.pos_ in ("INTJ", "DET", "ADP", "SCONJ", "CCONJ", "PART", "SYM", "PUNCT"):
163
- return False
164
- if t.dep_ in ("discourse", "intj", "vocative", "dep"):
165
- return False
166
- low = t.lower_.strip("¿?¡!.,;:()[]{}\"'").lower()
167
- # Permite wh en preguntas (advmod/obl)
168
- is_wh = t.tag_.startswith("W") or low in {
169
- "como","cómo","que","qué","quien","quién","donde","dónde","cuando","cuándo",
170
- "porqué","por","por qué","cuanto","cuánto",
171
- "which","what","who","where","when","why","how",
172
- }
173
- if is_wh and t.dep_ not in ("advmod", "obl") and "?" not in t.doc.text:
174
- return False
175
- if low in {"hola","hello","hi","hey","adios","adiós","ciao"}:
176
- return False
177
- return True
178
-
179
- # ------------ Mapeo lema→código ------------
180
- def code_es(lemma: str, target: str) -> str:
181
- lemma = norm_es(lemma)
182
- if target == "Minimax-ASCII":
183
- return ES2MINI.get(lemma) or enc_oov_minimax(lemma)
184
- else:
185
- return ES2KOMI.get(lemma) or enc_oov_komin(lemma)
186
-
187
- def code_en(lemma: str, target: str) -> str:
188
- lemma = norm_en(lemma)
189
- if target == "Minimax-ASCII":
190
- if EN2MINI: return EN2MINI.get(lemma) or enc_oov_minimax(lemma)
191
- return enc_oov_minimax(lemma)
192
- else:
193
- if EN2KOMI: return EN2KOMI.get(lemma) or enc_oov_komin(lemma)
194
- return enc_oov_komin(lemma)
195
-
196
- # ------------ Fraseador compacto ------------
197
- TAM_MINI = {"Pres":"P", "Past":"T", "Fut":"F", "UNK":"P"}
198
- TAM_KOMI = {"Pres":"Ⓟ", "Past":"Ⓣ", "Fut":"Ⓕ", "UNK":"Ⓟ"}
199
-
200
- def detect_polarity(doc) -> bool:
201
- return "?" in doc.text
202
-
203
- def detect_neg(doc) -> bool:
204
- for t in doc:
205
- if t.dep_ == "neg" or t.lower_ in ("no","not","n't"):
206
- return True
207
- return False
208
-
209
- def detect_tense(root):
210
- m = str(root.morph)
211
- if "Tense=Past" in m: return "Past"
212
- if "Tense=Fut" in m: return "Fut"
213
- if "Tense=Pres" in m: return "Pres"
214
- for c in root.children:
215
- if c.pos_ == "AUX":
216
- cm = str(c.morph)
217
- if "Tense=Past" in cm: return "Past"
218
- if c.lower_ == "will": return "Fut"
219
- return "Pres"
220
-
221
- def detect_person(root, src_lang: str) -> Optional[str]:
222
- m = str(root.morph)
223
- person_str = "3"
224
- number_str = "s"
225
- if "Person=" in m:
226
- for feat in m.split("|"):
227
- if feat.startswith("Person="):
228
- person_str = feat.split("=")[1]
229
- elif feat.startswith("Number="):
230
- number_str = "p" if feat.split("=")[1] == "Plur" else "s"
231
- return person_str + number_str
232
- return _person_of_doc(root.doc, src_lang)
233
-
234
- def extract_core(doc):
235
- root = next((t for t in doc if t.dep_=="ROOT" and t.pos_ in ("VERB","AUX")), doc[0])
236
- subs, objs, obls, advs = [], [], [], []
237
- for t in root.children:
238
- if t.dep_ in ("nsubj","nsubj:pass","csubj"):
239
- subs.append(t)
240
- elif t.dep_ in ("obj","dobj","iobj"):
241
- objs.append(t)
242
- elif t.dep_ in ("obl","pobj"):
243
- obls.append(t)
244
- elif t.dep_ in ("advmod","advcl") and t.pos_ == "ADV":
245
- advs.append(t)
246
- subs.sort(key=lambda x: x.i); objs.sort(key=lambda x: x.i)
247
- obls.sort(key=lambda x: x.i); advs.sort(key=lambda x: x.i)
248
- return root, subs, objs, obls, advs
249
-
250
- def _person_of_doc(doc, src_lang: str) -> Optional[str]:
251
- try:
252
- root = next((t for t in doc if t.dep_=="ROOT"), doc[0])
253
- subj = next((t for t in root.children if t.dep_.startswith("nsubj")), None)
254
- if subj is None: return None
255
- plur = ("Number=Plur" in str(subj.morph)) if src_lang=="Español" else (subj.tag_ in ("NNS","NNPS"))
256
- low = subj.lower_
257
- if src_lang=="Español":
258
- if low in ("yo",): return "1p" if plur else "1s"
259
- if low in ("tú","vos"): return "2p" if plur else "2s"
260
- if low in ("usted","él","ella"): return "3p" if plur else "3s"
261
- lem = lemma_of(subj, "Español")
262
- if lem in ("yo","nosotros"): return "1p" if plur else "1s"
263
- if lem in ("tú","vosotros"): return "2p" if plur else "2s"
264
- return "3p" if plur else "3s"
265
- else:
266
- if low in ("i",): return "1p" if plur else "1s"
267
- if low in ("you",): return "2p" if plur else "2s"
268
- if low in ("he","she","it"): return "3p" if plur else "3s"
269
- return "3p" if plur else "3s"
270
- except Exception:
271
- return None
272
 
 
273
  def realize_minimax(doc, src_lang: str, drop_articles=True, zero_copula=True, semi_lossless=False, person_hint="2s"):
274
- root, subs, objs, obls, advs = extract_core(doc)
275
- tense = detect_tense(root)
276
- is_q, is_neg = detect_polarity(doc), detect_neg(doc)
277
-
278
- vlem = lemma_of(root, src_lang) if USE_SPACY else ("ser" if "?" in doc.text else "estar")
279
- vcode = code_es(vlem, "Minimax-ASCII") if src_lang=="Español" else code_en(vlem, "Minimax-ASCII")
280
-
281
- tail = TAM_MINI.get(tense, "P")
282
- if semi_lossless:
283
- pi = detect_person(root, src_lang) or person_hint
284
- tail += pi
285
- if is_neg: tail += "N"
286
- if is_q: tail += "Q"
287
- if tail:
288
- vcode = f"{vcode}·{tail}"
289
-
290
- def realize_np(tokens):
291
- outs=[]
292
- for t in tokens:
293
- if not USE_SPACY or is_content_token(t):
294
- lem = lemma_of(t, src_lang) if USE_SPACY else (t.text)
295
- code = code_es(lem, "Minimax-ASCII") if src_lang=="Español" else code_en(lem, "Minimax-ASCII")
296
- if semi_lossless and USE_SPACY and (t.tag_ in ("NNS","NNPS") or "Number=Plur" in str(t.morph)):
297
- code = f"{code}[PL]"
298
- outs.append(code)
299
- return outs
300
-
301
- S = realize_np(subs)
302
- O = realize_np(objs) + realize_np(obls)
303
- ADV=[]
304
- wh_adv = [] # Para wh en Q
305
- for a in advs:
306
- if not USE_SPACY or is_content_token(a):
307
- lem = lemma_of(a, src_lang) if USE_SPACY else a.text
308
- code = code_es(lem, "Minimax-ASCII") if src_lang=="Español" else code_en(lem, "Minimax-ASCII")
309
- if is_q and a.dep_ == "advmod" and a.tag_.startswith("W"):
310
- wh_adv.append(code)
311
- else:
312
- ADV.append(code)
313
-
314
- if zero_copula and not semi_lossless and vlem in ("ser","estar","be") and tense=="Pres" and not is_neg and not is_q:
315
- parts = S + O + ADV
316
- else:
317
- parts = [vcode] + S + O + ADV
318
- full_parts = wh_adv + parts # Wh al frente si Q
319
- return " ".join(p for p in full_parts if p)
320
-
321
- def realize_komin(doc, src_lang: str, drop_articles=True, zero_copula=True, semi_lossless=False, person_hint="2s"):
322
- root, subs, objs, obls, advs = extract_core(doc)
323
- tense, is_q, is_neg = detect_tense(root), detect_polarity(doc), detect_neg(doc)
324
-
325
- vlem = lemma_of(root, src_lang) if USE_SPACY else ("ser" if "?" in doc.text else "estar")
326
- vcode = code_es(vlem, "Kōmín-CJK") if src_lang=="Español" else code_en(vlem, "Kōmín-CJK")
327
-
328
- P_SUBJ, P_OBJ = "ᵖ", "ᵒ"
329
- NEG_M, Q_FIN = "̆", "?"
330
- TAM = TAM_KOMI.get(tense, "Ⓟ")
331
-
332
- if semi_lossless:
333
- pi = detect_person(root, src_lang) or person_hint
334
- TAM = TAM + f"[{pi}]"
335
-
336
- def realize_np(tokens, particle):
337
- outs=[]
338
- for t in tokens:
339
- if not USE_SPACY or is_content_token(t):
340
- lem = lemma_of(t, src_lang) if USE_SPACY else t.text
341
- code = code_es(lem, "Kōmín-CJK") if src_lang=="Español" else code_en(lem, "Kōmín-CJK")
342
- if semi_lossless and USE_SPACY and (t.tag_ in ("NNS","NNPS") or "Number=Plur" in str(t.morph)):
343
- code = f"{code}[PL]"
344
- outs.append(code + particle)
345
- return outs
346
-
347
- S = realize_np(subs, P_SUBJ)
348
- O = realize_np(objs + obls, P_OBJ)
349
- ADV=[]
350
- for a in advs:
351
- if not USE_SPACY or is_content_token(a):
352
- lem = lemma_of(a, src_lang) if USE_SPACY else a.text
353
- ADV.append(code_es(lem, "Kōmín-CJK") if src_lang=="Español" else code_en(lem, "Kōmín-CJK"))
354
-
355
- v_form = vcode + TAM + (NEG_M if is_neg else "")
356
-
357
- if zero_copula and not semi_lossless and vlem in ("ser","estar","be") and tense=="Pres" and not is_neg and not is_q:
358
- parts = S + O + ADV
359
- else:
360
- parts = S + O + ADV + [v_form]
361
- out = " ".join(parts)
362
- if is_q: out += " " + Q_FIN
363
- return out
364
-
365
- # ------------ Lossless (Base85 comprimido) ------------
366
- SIDECAR_B85_RE = re.compile(r"\s?§\((?P<b85>[A-Za-z0-9!#$%&()*+\-;<=>?@^_`{|}~]+)\)$")
367
-
368
- def b85_enc_raw(s: str) -> str:
369
- comp = zlib.compress(s.encode("utf-8"), 9)
370
- return base64.a85encode(comp, adobe=False).decode("ascii")
371
-
372
- def b85_dec_raw(b85s: str) -> str:
373
- comp = base64.a85decode(b85s.encode("ascii"), adobe=False)
374
- return zlib.decompress(comp).decode("utf-8")
375
-
376
- def attach_sidecar_b85(conlang_text: str, original_text: str) -> str:
377
- blob = b85_enc_raw(original_text)
378
- return f"{conlang_text} §({blob})"
379
-
380
- def extract_sidecar_b85(text: str) -> Optional[str]:
381
- m = SIDECAR_B85_RE.search(text)
382
- if not m: return None
383
- try:
384
- return b85_dec_raw(m.group("b85"))
385
- except Exception:
386
- return None
387
-
388
- def strip_sidecar_b85(text: str) -> str:
389
- return SIDECAR_B85_RE.sub("", text).rstrip()
390
-
391
- # ------------ Codificar / Decodificar léxico puro ------------
392
- def encode_simple(text: str, src_lang: str, target: str) -> str:
393
- if not text.strip(): return ""
394
- def repl_es(m):
395
- key = norm_es(m.group(0))
396
- code = ES2MINI.get(key) if target=="Minimax-ASCII" else ES2KOMI.get(key)
397
- return code or (enc_oov_minimax(m.group(0)) if target=="Minimax-ASCII" else enc_oov_komin(m.group(0)))
398
- def repl_en(m):
399
- key = norm_en(m.group(0))
400
- table = EN2MINI if target=="Minimax-ASCII" else EN2KOMI
401
- if table and key in table:
402
- return table[key]
403
- return enc_oov_minimax(m.group(0)) if target=="Minimax-ASCII" else enc_oov_komin(m.group(0))
404
- repl = repl_es if src_lang=="Español" else repl_en
405
- return WORD_RE.sub(repl, text)
406
-
407
- def pluralize_es(word: str) -> str:
408
- exceptions = {"uno": "unos", "buen": "buenos", "hombre": "hombres"}
409
- if word in exceptions: return exceptions[word]
410
- if word.endswith("z"): return word[:-1] + "ces"
411
- if word.endswith(("a", "e", "i", "o")): return word + "s"
412
- return word + "es"
413
-
414
- def pluralize_en(word: str) -> str:
415
- exceptions = {"man": "men", "woman": "women", "child": "children"}
416
- if word in exceptions: return exceptions[word]
417
- if word.endswith("y") and len(word) > 1 and word[-2] not in "aeiou": return word[:-1] + "ies"
418
- if word.endswith(("s", "sh", "ch", "x", "z")): return word + "es"
419
- return word + "s"
420
-
421
- def pluralize(word: str, tgt_lang: str) -> str:
422
- return pluralize_es(word) if tgt_lang == "Español" else pluralize_en(word)
423
-
424
- PRON_ES = {"yo", "tú", "él", "ella", "nosotros", "vosotros", "ellos", "ellas", "usted", "ustedes"}
425
- PRON_EN = {"i", "you", "he", "she", "it", "we", "they"}
426
-
427
- mini_tail_re = re.compile(r"^(?P<stem>.+?)·(?P<tail>[PTFNQ12sp]+)$")
428
-
429
  def decode_simple(text: str, source: str, tgt_lang: str) -> str:
430
- if not text.strip():
431
- return ""
432
- code2es = MINI2ES if source=="Minimax-ASCII" else KOMI2ES
433
- code2en = MINI2EN if source=="Minimax-ASCII" else KOMI2EN
434
- pron_set = PRON_ES if tgt_lang == "Español" else PRON_EN
435
-
436
- if source == "Kōmín-CJK":
437
- # Simplificado para Kōmín: maneja básico, pero foco en Minimax
438
- text = text.replace("?", "?").replace(" ", " ")
439
- return " ".join([code2es.get(w, w) for w in text.split() if w != "?"])
440
-
441
- # Minimax
442
  tokens = text.split()
443
- if not tokens: return ""
444
-
445
- lemma_tokens = []
446
- pl_flags = []
447
- verb_idx = -1
448
- verb_lemma = None
449
- verb_tense = "Pres"
450
- verb_person = "3s"
451
- has_q = False
452
- is_neg = False
453
-
454
- for i, part in enumerate(tokens):
455
- look = part.replace("[PL]", "")
456
- had_pl = "[PL]" in part
457
- pl_flags.append(had_pl)
458
-
459
- m = mini_tail_re.match(look)
460
  if m:
461
- verb_idx = len(lemma_tokens)
462
  stem = m.group("stem")
463
  tail = m.group("tail")
464
- vlem_es = code2es.get(stem)
465
- vlem_en = code2en.get(stem) if code2en else None
466
- vlem = vlem_es if tgt_lang == "Español" else (vlem_en or vlem_es or stem)
467
- if not vlem:
468
- if is_oov_minimax(stem):
469
- vlem = dec_oov_minimax(stem)
470
- else:
471
- vlem = stem
472
- lemma_tokens.append(vlem)
473
- pl_flags.append(False)
474
-
475
- # Parse tail
476
- if tail:
477
- if len(tail) > 0 and tail[0] in "PTF":
478
- verb_tense = {"P": "Pres", "T": "Past", "F": "Fut"}.get(tail[0], "Pres")
479
- pos = 1
480
- person = "3s"
481
- if len(tail) > pos and tail[pos] in "123":
482
- pos += 1
483
- if len(tail) > pos and tail[pos] in "sp":
484
- person = tail[pos-1] + tail[pos]
485
- pos += 1
486
- else:
487
- person = tail[pos-1] + "s"
488
- verb_person = person
489
- is_neg = "N" in tail[pos:]
490
- has_q = "Q" in tail[pos:]
491
- verb_lemma = vlem
492
- continue
493
-
494
- # No verbo
495
- w_es = code2es.get(look)
496
- w_en = code2en.get(look) if code2en else None
497
- w = w_es if tgt_lang == "Español" else (w_en or w_es or look)
498
- if not w:
499
- if is_oov_minimax(look):
500
- w = dec_oov_minimax(look)
501
- else:
502
- w = look
503
- lemma_tokens.append(w)
504
- pl_flags.append(had_pl)
505
-
506
- if verb_idx == -1:
507
- # Fallback zero copula
508
- verb_lemma = "ser" if tgt_lang == "Español" else "be"
509
- verb_tense = "Pres"
510
- verb_person = "3s"
511
- v_conj = _es_conj(verb_lemma, verb_tense, verb_person) if tgt_lang == "Español" else _en_conj(verb_lemma, verb_tense, verb_person)
512
- lemma_tokens.insert(1 if lemma_tokens else 0, v_conj)
513
- out_text = " ".join(lemma_tokens)
514
- else:
515
- # Conjuga
516
- conj_func = _es_conj if tgt_lang == "Español" else _en_conj
517
- v_conj = conj_func(verb_lemma, verb_tense, verb_person)
518
- if is_neg:
519
- neg_prefix = "no " if tgt_lang == "Español" else "not "
520
- v_conj = neg_prefix + v_conj
521
-
522
- # Reordena SVO
523
- post_v = lemma_tokens[verb_idx + 1:]
524
- pl_post = pl_flags[verb_idx + 1:]
525
- s_idx = next((j for j, w in enumerate(post_v) if w.lower() in pron_set), None)
526
- S = post_v[s_idx] if s_idx is not None else None
527
- if S:
528
- if pl_post[s_idx]:
529
- S = pluralize(S, tgt_lang)
530
- del post_v[s_idx]
531
- del pl_post[s_idx]
532
-
533
- O_ADV = []
534
- if post_v:
535
- O = pluralize(post_v[0], tgt_lang) if pl_post[0] else post_v[0]
536
- O_ADV.append(O)
537
- O_ADV.extend([pluralize(post_v[k], tgt_lang) if pl_post[k] else post_v[k] for k in range(1, len(post_v))])
538
-
539
- parts = [p for p in [S, v_conj] + O_ADV if p]
540
- out_text = " ".join(parts)
541
-
542
- # Wh en Q: si primer token es wh, muévelo al frente
543
- if has_q and lemma_tokens and lemma_tokens[0].lower() in {"como", "cómo", "what", "how"}:
544
- wh = lemma_tokens.pop(0)
545
- out_text = f"{wh} {out_text}"
546
-
547
- # Pregunta
548
- if has_q:
549
- start_q = "¿" if tgt_lang == "Español" else ""
550
- end_q = "?" if tgt_lang == "Español" else "?"
551
- out_text = f"{start_q}{out_text.capitalize()}{end_q}"
552
-
553
  return out_text
554
 
555
- # ------------ Conjugadores mínimos ------------
556
- _ES_SUBJ = {"1s":"yo","2s":"tú","3s":"él/ella","1p":"nosotros","2p":"vosotros","3p":"ellos"}
557
- _EN_SUBJ = {"1s":"I","2s":"you","3s":"he","1p":"we","2p":"you","3p":"they"}
558
-
559
- def _es_conj_regular(lemma, tense, person):
560
- if not lemma.endswith(("ar","er","ir")): return lemma
561
- stem = lemma[:-2]; vtype = lemma[-2:]
562
- pres = {
563
- "ar": {"1s":"o","2s":"as","3s":"a","1p":"amos","2p":"áis","3p":"an"},
564
- "er": {"1s":"o","2s":"es","3s":"e","1p":"emos","2p":"éis","3p":"en"},
565
- "ir": {"1s":"o","2s":"es","3s":"e","1p":"imos","2p":"ís","3p":"en"},
566
- }
567
- pret = {
568
- "ar": {"1s":"é","2s":"aste","3s":"ó","1p":"amos","2p":"asteis","3p":"aron"},
569
- "er": {"1s":"í","2s":"iste","3s":"ió","1p":"imos","2p":"isteis","3p":"ieron"},
570
- "ir": {"1s":"í","2s":"iste","3s":"ió","1p":"imos","2p":"isteis","3p":"ieron"},
571
- }
572
- fut = {"1s":"é","2s":"ás","3s":"á","1p":"emos","2p":"éis","3p":"án"}
573
- if tense == "Pres": return stem + pres[vtype].get(person, pres[vtype]["3s"])
574
- if tense == "Past": return stem + pret[vtype].get(person, pret[vtype]["3s"])
575
- return lemma + fut.get(person, fut["3s"])
576
-
577
- def _es_conj(lemma, tense, person):
578
- if lemma == "ser":
579
- tab = {
580
- "Pres":{"1s":"soy","2s":"eres","3s":"es","1p":"somos","2p":"sois","3p":"son"},
581
- "Past":{"1s":"fui","2s":"fuiste","3s":"fue","1p":"fuimos","2p":"fuisteis","3p":"fueron"},
582
- "Fut":{"1s":"seré","2s":"serás","3s":"será","1p":"seremos","2p":"seréis","3p":"serán"},
583
- }; return tab[tense].get(person, tab[tense]["3s"])
584
- if lemma == "estar":
585
- tab = {
586
- "Pres":{"1s":"estoy","2s":"estás","3s":"está","1p":"estamos","2p":"estáis","3p":"están"},
587
- "Past":{"1s":"estuve","2s":"estuviste","3s":"estuvo","1p":"estuvimos","2p":"estuvisteis","3p":"estuvieron"},
588
- "Fut":{"1s":"estaré","2s":"estarás","3s":"estará","1p":"estaremos","2p":"estaréis","3p":"estarán"},
589
- }; return tab[tense].get(person, tab[tense]["3s"])
590
- if lemma == "ir":
591
- tab = {
592
- "Pres":{"1s":"voy","2s":"vas","3s":"va","1p":"vamos","2p":"vais","3p":"van"},
593
- "Past":{"1s":"fui","2s":"fuiste","3s":"fue","1p":"fuimos","2p":"fuisteis","3p":"fueron"},
594
- "Fut":{"1s":"iré","2s":"irás","3s":"irá","1p":"iremos","2p":"iréis","3p":"irán"},
595
- }; return tab[tense].get(person, tab[tense]["3s"])
596
- return _es_conj_regular(lemma, tense, person)
597
-
598
- def _en_conj(lemma, tense, person):
599
- if lemma == "be":
600
- if tense == "Pres":
601
- return {"1s":"am","2s":"are","3s":"is","1p":"are","2p":"are","3p":"are"}.get(person, "is")
602
- if tense == "Past":
603
- return {"1s":"was","2s":"were","3s":"was","1p":"were","2p":"were","3p":"were"}.get(person, "was")
604
- return "be"
605
- if lemma == "have":
606
- if tense == "Pres": return "has" if person=="3s" else "have"
607
- if tense == "Past": return "had"
608
- return "have"
609
- if lemma == "go":
610
- if tense == "Past": return "went"
611
- return "goes" if (tense=="Pres" and person=="3s") else "go"
612
- if lemma == "do":
613
- if tense == "Past": return "did"
614
- return "does" if (tense=="Pres" and person=="3s") else "do"
615
-
616
- if tense == "Pres":
617
- if person == "3s":
618
- if lemma.endswith("y") and (len(lemma)<2 or lemma[-2] not in "aeiou"):
619
- return lemma[:-1] + "ies"
620
- if lemma.endswith(("s","sh","ch","x","z","o")):
621
- return lemma + "es"
622
- return lemma + "s"
623
- return lemma
624
- elif tense == "Past":
625
- if lemma.endswith("e"): return lemma + "d"
626
- if lemma.endswith("y") and (len(lemma)<2 or lemma[-2] not in "aeiou"): return lemma[:-1] + "ied"
627
- return lemma + "ed"
628
- else:
629
- return lemma
630
-
631
- # ------------ Semi-lossless (rutas) ------------
632
- def _build_with_spacy(text: str, src_lang: str, target: str,
633
- drop_articles: bool, zero_copula: bool, semi_lossless: bool) -> str:
634
- nlp = nlp_es if src_lang=="Español" else nlp_en
635
- doc_full = nlp(text)
636
- doc = pick_predicative_sentence(doc_full)
637
- if target == "Minimax-ASCII":
638
- return realize_minimax(doc, src_lang, drop_articles, zero_copula, semi_lossless=semi_lossless)
639
- else:
640
- return realize_komin(doc, src_lang, drop_articles, zero_copula, semi_lossless=semi_lossless)
641
-
642
- def build_sentence(text: str, src_lang: str, target: str,
643
- drop_articles: bool, zero_copula: bool, mode: str, lossless: bool = False) -> str:
644
  if not text.strip(): return ""
645
- semi = True # Siempre semi-lossless
646
- core = _build_with_spacy(text, src_lang, target, drop_articles, zero_copula and not semi, semi_lossless=semi) if USE_SPACY else encode_simple(text, src_lang, target)
647
- if lossless:
648
- return attach_sidecar_b85(core, text)
649
  return core
650
 
651
- def universal_translate(text: str, src: str, tgt: str,
652
- drop_articles: bool, zero_copula: bool,
653
- mode: str, lossless: bool = False) -> str:
654
  if not text.strip(): return ""
655
  if src == tgt: return text
656
-
657
- # Natural → Conlang
658
  if src in ("Español","English") and tgt in ("Minimax-ASCII","Kōmín-CJK"):
659
- return build_sentence(text, src, tgt, drop_articles, zero_copula, mode, lossless)
660
-
661
- # Conlang → Natural (considera sidecars)
662
  if src in ("Minimax-ASCII","Kōmín-CJK") and tgt in ("Español","English"):
663
- # Lossless b85
664
- orig = extract_sidecar_b85(text)
665
  if orig is not None: return orig
666
- # Semi-lossless -> decodificación léxica
667
- return decode_simple(strip_sidecar_b85(text), src, tgt)
668
-
669
- # Natural ↔ Natural (lemas)
670
- if src in ("Español","English") and tgt in ("Español","English"):
671
- return translate_natural(text, src, tgt)
672
-
673
- # Conlang ↔ Conlang (simplificado)
674
- if src in ("Minimax-ASCII","Kōmín-CJK") and tgt in ("Minimax-ASCII","Kōmín-CJK"):
675
- # Preserva sidecar si hay
676
- orig_b85 = extract_sidecar_b85(text)
677
- core = strip_sidecar_b85(text)
678
- es_lemmas = decode_simple(core, src, "Español")
679
- words = re.findall(r"\w+|[^\w\s]+", es_lemmas)
680
- out=[]
681
- for w in words:
682
- if re.fullmatch(r"\w+", w):
683
- code = ES2MINI.get(norm_es(w)) if tgt=="Minimax-ASCII" else ES2KOMI.get(norm_es(w))
684
- if not code:
685
- code = enc_oov_minimax(w) if tgt=="Minimax-ASCII" else enc_oov_komin(w)
686
- out.append(code)
687
- else:
688
- out.append(w)
689
- out_text = " ".join(out)
690
- if orig_b85 is not None:
691
- return attach_sidecar_b85(out_text, orig_b85)
692
- return out_text
693
-
694
- return "[No soportado]"
695
-
696
- def translate_natural(text: str, src_lang: str, tgt_lang: str) -> str:
697
- if not text.strip(): return ""
698
- if not USE_SPACY: return text
699
- nlp = nlp_es if src_lang=="Español" else nlp_en
700
- doc = nlp(text)
701
- out=[]
702
- for t in doc:
703
- if not t.is_alpha:
704
- out.append(t.text); continue
705
- lem = lemma_of(t, src_lang)
706
- if src_lang=="Español":
707
- tr = ES2EN_LEMMA.get(lem)
708
- out.append(tr if tr else lem)
709
- else:
710
- tr = EN2ES_LEMMA.get(lem)
711
- out.append(tr if tr else lem)
712
- return " ".join(out)
713
-
714
- def round_trip(text, src, tgt, mode, lossless):
715
- conlang = universal_translate(text, src, tgt, True, False, mode, lossless)
716
- back = universal_translate(conlang, tgt, src, True, False, mode, lossless)
717
- return conlang, back
718
-
719
- # ------------ UI y explicaciones ------------
720
- EXPLAIN_ES = """
721
- **Modo único: Semi-lossless** — Compacto con hints para reconstruir orden/morfología. Round-trip fiable (~90%). Activa "Lossless" para 100% exacto con sidecar.
722
- **Conlangs**: Minimax (VSO, ·TAMpersonNQ), Kōmín (SOV, ᵖ/ᵒ Ⓟ[2s]̆?).
723
- """
724
-
725
- ALL_LANGS = ["Español","English","Minimax-ASCII","Kōmín-CJK"]
726
-
727
- with gr.Blocks(title="Universal Conlang Translator") as demo:
728
- gr.Markdown("# Universal Conlang Translator · Simplificado")
729
- gr.Markdown(EXPLAIN_ES)
730
-
731
- # --- Traducir (universal) ---
732
- with gr.Tab("Traducir"):
733
- with gr.Row():
734
- uni_src = gr.Dropdown(ALL_LANGS, value="Español", label="Fuente")
735
- uni_tgt = gr.Dropdown(ALL_LANGS, value="Minimax-ASCII", label="Destino")
736
- uni_text = gr.Textbox(lines=3, label="Texto", value="Hola, ¿cómo estás?")
737
- with gr.Row():
738
- uni_drop = gr.Checkbox(value=True, label="Omitir artículos (ES/EN→conlang)")
739
- uni_zero = gr.Checkbox(value=False, label="Cópula cero (presente afirm.) (ES/EN→conlang)")
740
- uni_lossless = gr.Checkbox(value=False, label="Modo lossless (sidecar b85)")
741
- uni_mode = gr.Dropdown(["Semi-lossless"], value="Semi-lossless", visible=False) # Fijo y oculto
742
- uni_out = gr.Textbox(lines=6, label="Traducción")
743
- gr.Button("Traducir").click(
744
- universal_translate,
745
- [uni_text, uni_src, uni_tgt, uni_drop, uni_zero, uni_mode, uni_lossless],
746
- [uni_out]
747
- )
748
-
749
- # --- Construir frase (ES/EN → Conlang) ---
750
- with gr.Tab("Construir frase (ES/EN → Conlang)"):
751
- with gr.Row():
752
- src_lang = gr.Dropdown(["Español","English"], value="Español", label="Fuente")
753
- target = gr.Dropdown(["Minimax-ASCII","Kōmín-CJK"], value="Minimax-ASCII", label="Conlang")
754
- text_in = gr.Textbox(lines=3, label="Frase", value="Hola, ¿cómo estás?")
755
- with gr.Row():
756
- drop_articles = gr.Checkbox(value=True, label="Omitir artículos")
757
- zero_copula = gr.Checkbox(value=False, label="Cópula cero (presente afirm.)")
758
- lossless_build = gr.Checkbox(value=False, label="Modo lossless (sidecar b85)")
759
- mode_build = gr.Dropdown(["Semi-lossless"], value="Semi-lossless", visible=False)
760
- out = gr.Textbox(lines=6, label="Salida")
761
- gr.Button("Construir").click(
762
- build_sentence,
763
- [text_in, src_lang, target, drop_articles, zero_copula, mode_build, lossless_build],
764
- [out]
765
- )
766
-
767
- # --- Decodificar (Conlang → ES/EN) ---
768
- with gr.Tab("Decodificar (Conlang → ES/EN)"):
769
- with gr.Row():
770
- src_code = gr.Dropdown(["Minimax-ASCII","Kōmín-CJK"], value="Minimax-ASCII", label="Fuente")
771
- tgt_lang = gr.Dropdown(["Español","English"], value="Español", label="Destino")
772
- code_in = gr.Textbox(lines=3, label="Texto en conlang (incluye §(...) si procede)")
773
- out3 = gr.Textbox(lines=6, label="Salida")
774
-
775
- def decode_lossless_aware(text, src, tgt):
776
- orig = extract_sidecar_b85(text)
777
- if orig is not None:
778
- return orig
779
- return decode_simple(strip_sidecar_b85(text), src, tgt)
780
-
781
- gr.Button("Decodificar").click(
782
- decode_lossless_aware, [code_in, src_code, tgt_lang], [out3]
783
- )
784
-
785
- # --- Round-trip ---
786
- with gr.Tab("Prueba ida→vuelta"):
787
- with gr.Row():
788
- rt_src = gr.Dropdown(["Español","English"], value="Español", label="Fuente")
789
- rt_tgt = gr.Dropdown(["Minimax-ASCII","Kōmín-CJK"], value="Minimax-ASCII", label="Conlang")
790
- rt_text = gr.Textbox(lines=3, label="Frase", value="Hola, ¿cómo estás?")
791
- rt_lossless = gr.Checkbox(value=False, label="Lossless")
792
- rt_mode = gr.Dropdown(["Semi-lossless"], value="Semi-lossless", visible=False)
793
- rt_out_conlang = gr.Textbox(lines=3, label="Conlang (ida)")
794
- rt_out_back = gr.Textbox(lines=3, label="Vuelta")
795
- gr.Button("Probar").click(
796
- round_trip,
797
- [rt_text, rt_src, rt_tgt, rt_mode, rt_lossless],
798
- [rt_out_conlang, rt_out_back]
799
- )
800
 
801
- if __name__ == "__main__":
802
- demo.launch()
 
803
 
 
804
 
805
 
 
1
+ # app.py — Universal Conlang Translator (Max Compresión Exacta)
2
+ # ... (imports iguales)
 
 
 
 
 
 
 
 
 
3
 
4
+ # ... (load_lexicons, norm_es, etc. iguales)
 
 
 
 
 
 
 
5
 
6
+ # OOV y custom_b64 iguales
 
 
 
7
 
8
+ # Actualiza b85 a custom_sidecar
9
+ def custom_sidecar_enc(conlang_text: str, original_text: str) -> str:
10
+ comp = zlib.compress(original_text.encode("utf-8"), 9)
11
+ blob = to_custom_b64(comp, ALPHA_MINI64)
12
+ return f"{conlang_text} ~{blob}"
13
 
14
+ def extract_custom_sidecar(text: str) -> Optional[str]:
15
+ if '~' in text:
16
+ core, blob = text.rsplit('~', 1)
17
+ try:
18
+ comp = from_custom_b64(blob, ALPHA_MINI64)
19
+ return zlib.decompress(comp).decode("utf-8")
20
+ except Exception:
21
+ return None
22
+ return None
23
 
24
+ def strip_custom_sidecar(text: str) -> str:
25
+ return text.split('~')[0].rstrip() if '~' in text else text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ # Actualiza is_content_token: permite TODO para exactitud
28
  def is_content_token(t) -> bool:
29
+ return True # No filtra nada; todo se codifica
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ # Actualiza realize_minimax: incluye todos los tokens (saludos, wh, etc.)
32
  def realize_minimax(doc, src_lang: str, drop_articles=True, zero_copula=True, semi_lossless=False, person_hint="2s"):
33
+ # Split full text into tokens (incluye punct)
34
+ tokens = re.findall(r"\S+", doc) # No filtra; todo
35
+ if not tokens: return ""
36
+ # Asume primer verbo-ish para hints (simple)
37
+ v_idx = next((i for i, t in enumerate(tokens) if t.lower() in ["estás", "eres", "soy", "estar", "ser"]), 0)
38
+ parts = []
39
+ for i, t in enumerate(tokens):
40
+ lem = t.lower().rstrip('?¿!¡.,;') # Limpia punct para code, añade después
41
+ punct = t[len(lem):] if len(t) > len(lem) else ""
42
+ code = code_es(lem, "Minimax-ASCII") if src_lang=="Español" else code_en(lem, "Minimax-ASCII")
43
+ if i == v_idx and semi_lossless:
44
+ tense = "P" # Detect simple
45
+ pi = "2s" # Asume
46
+ tail = f"{tense}{pi}Q" if "?" in doc else f"{tense}{pi}"
47
+ code = f"{code}·{tail}"
48
+ parts.append(code + punct)
49
+ return " ".join(parts)
50
+
51
+ # Decode: simple reverse para semi, pero sidecar para exact
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def decode_simple(text: str, source: str, tgt_lang: str) -> str:
53
+ # Para semi: reverse tokens, conjuga si ·tail
 
 
 
 
 
 
 
 
 
 
 
54
  tokens = text.split()
55
+ out = []
56
+ for part in tokens:
57
+ m = mini_tail_re.match(part.rstrip('?¿!¡.,;'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  if m:
 
59
  stem = m.group("stem")
60
  tail = m.group("tail")
61
+ vlem = MINI2ES.get(stem, dec_oov_minimax(stem)) if tgt_lang == "Español" else MINI2EN.get(stem, stem)
62
+ # Conjuga simple
63
+ v_conj = _es_conj(vlem, "Pres", "2s") if tgt_lang == "Español" else _en_conj(vlem, "Pres", "2s")
64
+ out.append(v_conj)
65
+ if "Q" in tail:
66
+ out[-1] += "?"
67
+ else:
68
+ w = MINI2ES.get(part.rstrip('?¿!¡.,;'), dec_oov_minimax(part)) if tgt_lang == "Español" else part
69
+ out.append(w + (part[-1] if part[-1] in '?¿!¡.,;' else ''))
70
+ out_text = " ".join(out)
71
+ if "?" in text:
72
+ out_text = f"¿{out_text}?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  return out_text
74
 
75
+ # Actualiza build_sentence y universal_translate
76
+ def build_sentence(text: str, src_lang: str, target: str, drop_articles: bool, zero_copula: bool, mode: str, max_comp_exact: bool = False) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  if not text.strip(): return ""
78
+ semi = True
79
+ core = realize_minimax(text, src_lang, drop_articles, zero_copula, semi) if USE_SPACY else encode_simple(text, src_lang, target) # Usa realize para full include
80
+ if max_comp_exact:
81
+ return custom_sidecar_enc(core, text)
82
  return core
83
 
84
+ def universal_translate(text: str, src: str, tgt: str, drop_articles: bool, zero_copula: bool, mode: str, max_comp_exact: bool = False) -> str:
 
 
85
  if not text.strip(): return ""
86
  if src == tgt: return text
 
 
87
  if src in ("Español","English") and tgt in ("Minimax-ASCII","Kōmín-CJK"):
88
+ return build_sentence(text, src, tgt, drop_articles, zero_copula, mode, max_comp_exact)
 
 
89
  if src in ("Minimax-ASCII","Kōmín-CJK") and tgt in ("Español","English"):
90
+ orig = extract_custom_sidecar(text)
 
91
  if orig is not None: return orig
92
+ return decode_simple(strip_custom_sidecar(text), src, tgt)
93
+ # Resto igual...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ # UI: cambia checkbox a "Max Compresión Exacta (sidecar oculto)"
96
+ # En tabs: uni_max_comp = gr.Checkbox(value=False, label="Max Compresión Exacta")
97
+ # Click: universal_translate(..., uni_max_comp)
98
 
99
+ # Resto del código (conjugadores, UI) igual al anterior
100
 
101