LoloSemper commited on
Commit
6d32ca6
·
verified ·
1 Parent(s): ec2c52a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +240 -323
app.py CHANGED
@@ -1,195 +1,176 @@
1
- import json, re, os
2
- from typing import Dict
3
- import gradio as gr
4
-
5
- # ===== Archivos del léxico (generados en Colab) =====
6
- MINI_JSON = "lexicon_minimax.json" # ES -> code (Minimax)
7
- KOMI_JSON = "lexicon_komin.json" # ES -> code (Kōmín)
8
- MAST_JSON = "lexicon_master.json" # opcional: [{'lemma_es','lemma_en','minimax','komin'}, ...]
9
-
10
- # ===== (Opcional) Fallback EN<->ES con Argos si falta master =====
11
- USE_ARGOS = False
12
- try:
13
- import argostranslate.translate as argos_tr
14
- USE_ARGOS = True
15
- except Exception:
16
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- def argos_translate_word(w: str, src: str, tgt: str) -> str:
19
- if not USE_ARGOS:
20
- return ""
 
21
  try:
22
- langs = argos_tr.get_installed_languages()
23
- lsrc = next((l for l in langs if l.code == src), None)
24
- ldst = next((l for l in langs if l.code == tgt), None)
25
- if not (lsrc and ldst):
26
- return ""
27
- tr = lsrc.get_translation(ldst)
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  return tr.translate(w) or ""
29
  except Exception:
30
  return ""
31
 
32
- # ===== Normalización =====
33
- WORD_RE = re.compile(r"[A-Za-zÁÉÍÓÚÜÑáéíóúüñ]+", re.UNICODE)
34
- STRIP = str.maketrans("ÁÉÍÓÚÜÑáéíóúüñ", "AEIOUUNaeiouun")
 
 
 
35
 
 
 
36
  def norm_es(w: str) -> str:
37
- return re.sub(r"[^a-záéíóúüñ]", "", w.lower()).translate(STRIP)
38
-
39
  def norm_en(w: str) -> str:
40
- return re.sub(r"[^a-z]", "", w.lower())
41
 
42
- # ===== Lematización (spaCy si está; si no, reglas + irregulares) =====
43
- USE_SPACY = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  try:
45
- import spacy
 
 
 
 
 
 
 
 
46
  try:
47
- nlp_es = spacy.load("es_core_news_sm")
48
- nlp_en = spacy.load("en_core_web_sm")
49
- USE_SPACY = True
 
 
 
 
 
 
 
 
 
 
50
  except Exception:
51
- nlp_es = nlp_en = None
52
- except Exception:
53
- nlp_es = nlp_en = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- # Irregulares frecuentes (clave normalizada sin tildes)
56
- IRREG_ES = {
57
- # estar
58
- "estoy":"estar","estas":"estar","esta":"estar","estamos":"estar","estan":"estar",
59
- "estuve":"estar","estuviste":"estar","estuvo":"estar","estuvimos":"estar","estuvieron":"estar",
60
- "estare":"estar","estaria":"estar",
61
- # ser
62
- "soy":"ser","eres":"ser","es":"ser","somos":"ser","son":"ser",
63
- "fui":"ser","fuiste":"ser","fue":"ser","fuimos":"ser","fueron":"ser",
64
- # tener
65
- "tengo":"tener","tienes":"tener","tiene":"tener","tenemos":"tener","tienen":"tener",
66
- "tuve":"tener","tuviste":"tener","tuvo":"tener","tuvimos":"tener","tuvieron":"tener",
67
- # ir
68
- "voy":"ir","vas":"ir","va":"ir","vamos":"ir","van":"ir",
69
- "iba":"ir","ibas":"ir","ibamos":"ir","iban":"ir",
70
- # haber (aux)
71
- "he":"haber","has":"haber","ha":"haber","hemos":"haber","han":"haber",
72
- "habia":"haber","habias":"haber","habian":"haber",
73
- # otros comunes
74
- "hago":"hacer","haces":"hacer","hace":"hacer","hacemos":"hacer","hacen":"hacer",
75
- "digo":"decir","dices":"decir","dice":"decir","decimos":"decir","dicen":"decir",
76
- "puedo":"poder","puedes":"poder","puede":"poder","podemos":"poder","pueden":"poder",
77
- "pongo":"poner","pones":"poner","pone":"poner","ponemos":"poner","ponen":"poner",
78
- "quiero":"querer","quieres":"querer","quiere":"querer","queremos":"querer","quieren":"querer",
79
- "vengo":"venir","vienes":"venir","viene":"venir","venimos":"venir","vienen":"venir",
80
- "veo":"ver","ves":"ver","ve":"ver","vemos":"ver","ven":"ver",
81
- "doy":"dar","das":"dar","da":"dar","damos":"dar","dan":"dar",
82
- "se":"saber","sabes":"saber","sabe":"saber","sabemos":"saber","saben":"saber",
83
- }
84
-
85
- INTERROG_ES = {
86
- "como":"cómo","cómo":"cómo","que":"qué","qué":"qué",
87
- "quien":"quién","quién":"quién","cuando":"cuándo","cuándo":"cuándo",
88
- "donde":"dónde","dónde":"dónde","cual":"cuál","cuál":"cuál",
89
- "cuanto":"cuánto","cuánto":"cuánto","cuanta":"cuánta","cuánta":"cuánta",
90
- "cuantos":"cuántos","cuántos":"cuántos","cuantas":"cuántas","cuántas":"cuántas",
91
- "porque":"porque","porqué":"porqué"
92
- }
93
-
94
- def lemma_es(token: str) -> str:
95
- tok_raw = token.strip()
96
- tok = norm_es(tok_raw)
97
- if not tok:
98
- return tok
99
-
100
- # Interrogativos y afines: conservar como “lema” propio (con o sin acento)
101
- if tok_raw.lower() in INTERROG_ES or tok in INTERROG_ES:
102
- base = INTERROG_ES.get(tok_raw.lower(), INTERROG_ES.get(tok, tok))
103
- return base
104
-
105
- # Irregulares más comunes
106
- if tok in IRREG_ES:
107
- return IRREG_ES[tok]
108
-
109
- # spaCy si está disponible
110
- if USE_SPACY and nlp_es:
111
- doc = nlp_es(tok)
112
- for t in doc:
113
- if t.is_alpha:
114
- lem = norm_es(t.lemma_)
115
- if lem:
116
- return lem
117
-
118
- # Heurística conservadora (evita confundir “como”→“comer”):
119
- rules = [
120
- ("ando","ar"),("iendo","er"),("yendo","ir"), # gerundios
121
- ("abamos","ar"),("ábamos","ar"),("iamos","er"),("íamos","er"),("iamos","ir"),("íamos","ir"),
122
- ("aste","ar"),("asteis","ar"),("aron","ar"),
123
- ("iste","er"),("isteis","er"),("ieron","er"),("imos","er"),
124
- ("iste","ir"),("isteis","ir"),("ieron","ir"),("imos","ir"),
125
- ("aba","ar"),("abas","ar"),("aban","ar"),
126
- ("ia","er"),("ía","er"),("ias","er"),("ías","er"),("ian","er"),("ían","er"),
127
- ("ia","ir"),("ía","ir"),("ias","ir"),("ías","ir"),("ian","ir"),("ían","ir"),
128
- ("are","ar"),("aré","ar"),("ere","er"),("eré","er"),("ire","ir"),("iré","ir"),
129
- ("aria","ar"),("aría","ar"),("eria","er"),("ería","er"),("iria","ir"),("iría","ir"),
130
- ]
131
- for suf, inf in rules:
132
- if tok.endswith(suf) and len(tok) > len(suf)+1:
133
- base = tok[:-len(suf)]
134
- return base + inf
135
-
136
- return tok # por defecto no tocar
137
-
138
- def lemma_en(token: str) -> str:
139
- tok = norm_en(token)
140
- if not tok:
141
- return tok
142
- if USE_SPACY and nlp_en:
143
- doc = nlp_en(tok)
144
- for t in doc:
145
- if t.is_alpha:
146
- lem = norm_en(t.lemma_)
147
- if lem:
148
- return lem
149
- # Heurística mínima: plurales y sufijos comunes
150
- for suf, rep in [("ies","y"),("ing",""),("ed",""),("s","")]:
151
- if tok.endswith(suf) and len(tok) > len(suf)+1:
152
- return tok[:-len(suf)] + rep
153
- return tok
154
-
155
- # ===== Carga de léxicos =====
156
- def load_json(path: str):
157
- if not os.path.exists(path):
158
- return None
159
- with open(path, "r", encoding="utf-8") as f:
160
- return json.load(f)
161
-
162
- def build_dicts():
163
- mm = load_json(MINI_JSON) or {}
164
- kk = load_json(KOMI_JSON) or {}
165
- master = load_json(MAST_JSON) or {}
166
-
167
- es2mini: Dict[str, str] = (mm.get("mapping") or {})
168
- es2komi: Dict[str, str] = (kk.get("mapping") or {})
169
-
170
- en2mini: Dict[str, str] = {}
171
- en2komi: Dict[str, str] = {}
172
- if isinstance(master, dict) and "entries" in master:
173
- for e in master["entries"]:
174
- es = norm_es(str(e.get("lemma_es","")))
175
- en = norm_en(str(e.get("lemma_en","")))
176
- mi = str(e.get("minimax",""))
177
- ko = str(e.get("komin",""))
178
- if en and mi:
179
- en2mini[en] = mi
180
- if en and ko:
181
- en2komi[en] = ko
182
-
183
- mini2es = {v:k for k,v in es2mini.items()}
184
- komi2es = {v:k for k,v in es2komi.items()}
185
- mini2en = {v:k for k,v in en2mini.items()} if en2mini else {}
186
- komi2en = {v:k for k,v in en2komi.items()} if en2komi else {}
187
- return es2mini, es2komi, en2mini, en2komi, mini2es, komi2es, mini2en, komi2en
188
-
189
- ES2MINI, ES2KOMI, EN2MINI, EN2KOMI, MINI2ES, KOMI2ES, MINI2EN, KOMI2EN = build_dicts()
190
-
191
- # ===== Refuerzo: asigna códigos cortos a “básicos” si faltan =====
192
- ALPHA_MINI = "@ptkmnslraeiouy0123456789><=:/!?.+-_*#bcdfghjvqwxzACEGHIJKLMNOPRS"[:64]
193
  CJK_BASE = (
194
  "天地人日月山川雨風星火水木土金石光影花草鳥犬猫魚"
195
  "東西南北中外上下午夜明暗手口目耳心言書家道路門"
@@ -198,152 +179,88 @@ CJK_BASE = (
198
  )
199
  ALPHA_CJK = (CJK_BASE * 10)[:256]
200
 
201
- def shortest_unused(prefix_list, used: set, alphabet: str, max_len: int = 3):
202
- for L in range(1, max_len+1):
203
- for p in prefix_list:
204
- if len(p) == L and p not in used:
205
- return p
206
- def gen(L):
207
- if L == 1:
208
- for ch in alphabet:
209
- yield ch
210
- else:
211
- for prev in gen(L-1):
212
- for ch in alphabet:
213
- yield prev + ch
214
- for cand in gen(L):
215
- if cand not in used:
216
- return cand
217
- # fallback
218
- i = 1
219
- while True:
220
- cand = prefix_list[0] + alphabet[0]*i
221
- if cand not in used:
222
- return cand
223
- i += 1
224
-
225
- def augment_basics():
226
- global ES2MINI, ES2KOMI, MINI2ES, KOMI2ES
227
- basics = [
228
- "hola","adios","gracias","por","favor","si","no",
229
- "que","qué","quien","quién","como","cómo",
230
- "cuando","cuándo","donde","dónde","cual","cuál"
231
- ]
232
- used_mini = set(ES2MINI.values())
233
- used_komi = set(ES2KOMI.values())
234
- for w in basics:
235
- k = norm_es(w)
236
- if k not in ES2MINI:
237
- code = shortest_unused([w[:1].lower()], used_mini, ALPHA_MINI, max_len=3)
238
- ES2MINI[k] = code; MINI2ES[code] = k; used_mini.add(code)
239
- if k not in ES2KOMI:
240
- code = shortest_unused([w[:1]], used_komi, ALPHA_CJK, max_len=2)
241
- ES2KOMI[k] = code; KOMI2ES[code] = k; used_komi.add(code)
242
-
243
- augment_basics()
244
-
245
- # ===== Codificar ES/EN → conlang (con lematización) =====
246
- def encode_text(text: str, src_lang: str, target: str) -> str:
247
- if not text.strip():
248
- return ""
249
- lex_es = ES2MINI if target == "Minimax-ASCII" else ES2KOMI
250
- lex_en = EN2MINI if target == "Minimax-ASCII" else EN2KOMI
251
- use_en_lex = bool(lex_en)
252
-
253
- def repl(m):
254
- tok = m.group(0)
255
- if src_lang == "Español":
256
- key = lemma_es(tok)
257
- return lex_es.get(key, tok)
258
- else:
259
- key = lemma_en(tok)
260
- if use_en_lex and key in lex_en:
261
- return lex_en[key]
262
- # fallback EN->ES con Argos si no hay master
263
- es_word = argos_translate_word(tok, "en", "es") if USE_ARGOS else ""
264
- key_es = lemma_es(es_word) if es_word else ""
265
- return lex_es.get(key_es, tok) if key_es else tok
266
-
267
- return WORD_RE.sub(repl, text)
268
-
269
- # ===== Decodificar conlang → ES/EN =====
270
- SPLIT_CODE_RE = re.compile(r"([^\w\s]+)")
271
-
272
- def decode_text(text: str, source: str, tgt_lang: str) -> str:
273
- if not text.strip():
274
- return ""
275
- code2es = MINI2ES if source == "Minimax-ASCII" else KOMI2ES
276
- code2en = MINI2EN if source == "Minimax-ASCII" else KOMI2EN
277
- have_en = bool(code2en)
278
-
279
- parts = []
280
- for chunk in re.split(r"(\s+)", text):
281
- if not chunk:
282
- continue
283
- sub = re.split(SPLIT_CODE_RE, chunk)
284
- parts.extend([s for s in sub if s != ""])
285
-
286
- out = []
287
- for p in parts:
288
- if p.isspace() or re.fullmatch(SPLIT_CODE_RE, p):
289
- out.append(p)
290
- continue
291
- es = code2es.get(p)
292
- if tgt_lang == "Español":
293
- out.append(es if es else p)
294
  else:
295
- if have_en and p in code2en:
296
- out.append(code2en[p])
297
- else:
298
- if es:
299
- en = argos_translate_word(es, "es", "en") if USE_ARGOS else ""
300
- out.append(en if en else es)
301
- else:
302
- out.append(p)
303
- return "".join(out)
304
-
305
- # ===== Ayudas UI =====
306
- HELP_ES = """
307
- **Consejos:**
308
- - Este Space **lematiza** la entrada (spaCy si está disponible; si no, reglas + irregulares), así “estás”→“estar” y casa con tu léxico.
309
- - Añadimos una pequeña **capa de básicos** (hola, gracias, sí, no, interrogativos) si faltan en los JSON, con códigos cortos sin colisiones.
310
- - Si prefieres trabajar por **formas superficiales** (sin lemas), regenera los JSON en Colab con `LEMMATIZE=False`.
311
- """
312
-
313
- HELP_EN = """
314
- **Tips:**
315
- - Input is **lemmatized** (spaCy if available; otherwise rules + irregulars), so “running”→“run” and matches your lexicon.
316
- - A small set of **basic words** (hello/thanks/yes/no/interrogatives) gets short codes if missing from JSONs.
317
- - Prefer surface forms? Rebuild the lexicon in Colab with `LEMMATIZE=False`.
318
- """
319
-
320
- # ===== UI =====
321
- with gr.Blocks(title="Conlangs hermanos · Minimax/Kōmín · ES/EN") as demo:
322
- gr.Markdown("# Conlangs hermanos · Minimax-ASCII / Kōmín-CJK")
323
- with gr.Row():
324
- gr.Markdown(HELP_ES)
325
- gr.Markdown(HELP_EN)
326
-
327
- with gr.Tab("Codificar (ES/EN → Conlang)"):
328
- with gr.Row():
329
- src_lang = gr.Dropdown(["Español", "English"], value="Español", label="Idioma fuente")
330
- tgt_con = gr.Dropdown(["Minimax-ASCII", "Kōmín-CJK"], value="Minimax-ASCII", label="Conlang destino")
331
- text_in = gr.Textbox(lines=4, label="Texto fuente", value="Hola ¿Cómo estás?")
332
- btn_enc = gr.Button("Codificar", variant="primary")
333
- text_out = gr.Textbox(lines=6, label="Salida")
334
- btn_enc.click(encode_text, [text_in, src_lang, tgt_con], [text_out])
335
-
336
- with gr.Tab("Decodificar (Conlang → ES/EN)"):
337
- with gr.Row():
338
- src_code = gr.Dropdown(["Minimax-ASCII", "Kōmín-CJK"], value="Minimax-ASCII", label="Conlang fuente")
339
- tgt_lang = gr.Dropdown(["Español", "English"], value="Español", label="Idioma destino")
340
- code_in = gr.Textbox(lines=4, label="Texto en conlang (separa códigos por espacios si es necesario)")
341
- btn_dec = gr.Button("Decodificar", variant="secondary")
342
- plain_out = gr.Textbox(lines=6, label="Salida")
343
- btn_dec.click(decode_text, [code_in, src_code, tgt_lang], [plain_out])
344
-
345
- if __name__ == "__main__":
346
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
 
349
 
 
1
+ # =========================================
2
+ # COLAB · Construcción masiva de léxico ES/EN desde OMW (WordNet)
3
+ # y asignación de códigos para Minimax/Kōmín
4
+ # =========================================
5
+ !pip -q install wn wordfreq spacy
6
+
7
+ import wn, json, csv, re, os, sys, math, random
8
+ from collections import OrderedDict, defaultdict
9
+ from typing import List, Dict, Tuple
10
+
11
+ # ---- Parámetros editables ----
12
+ SEED = 4242
13
+ USE_SPACY = True # Lematizar con spaCy si está leíble
14
+ USE_ARGOS = False # Completar EN faltante vía Argos (requiere red y modelos)
15
+ MAXLEN_MINI = 3 # máx. longitud de código Minimax
16
+ MAXLEN_CJK = 2 # máx. longitud de código Kōmín
17
+ LIMIT_ES = None # None = todos los lemas spa de OMW; o un entero para recortar
18
+ # ------------------------------
19
+
20
+ # (opcional) spaCy
21
+ if USE_SPACY:
22
+ import spacy, spacy.cli
23
+ try:
24
+ nlp_es = spacy.load("es_core_news_sm")
25
+ except Exception:
26
+ try:
27
+ spacy.cli.download("es_core_news_sm"); nlp_es = spacy.load("es_core_news_sm")
28
+ except Exception:
29
+ nlp_es = None
30
+ try:
31
+ nlp_en = spacy.load("en_core_web_sm")
32
+ except Exception:
33
+ try:
34
+ spacy.cli.download("en_core_web_sm"); nlp_en = spacy.load("en_core_web_sm")
35
+ except Exception:
36
+ nlp_en = None
37
+ else:
38
+ nlp_es = nlp_en = None
39
 
40
+ # (opcional) Argos
41
+ if USE_ARGOS:
42
+ !pip -q install argostranslate
43
+ import argostranslate.package, argostranslate.translate
44
  try:
45
+ available = argostranslate.package.get_available_packages()
46
+ need = [p for p in available if {p.from_code, p.to_code} == {"es","en"}]
47
+ for p in need:
48
+ path = p.download()
49
+ argostranslate.package.install_from_path(path)
50
+ ARGOS_OK = True
51
+ except Exception as e:
52
+ print("[Aviso] No se pudieron instalar modelos Argos:", e)
53
+ ARGOS_OK = False
54
+ else:
55
+ ARGOS_OK = False
56
+
57
+ def argos_es2en(w: str) -> str:
58
+ if not ARGOS_OK: return ""
59
+ try:
60
+ langs = argostranslate.translate.get_installed_languages()
61
+ es = next((l for l in langs if l.code=="es"), None)
62
+ en = next((l for l in langs if l.code=="en"), None)
63
+ tr = es.get_translation(en)
64
  return tr.translate(w) or ""
65
  except Exception:
66
  return ""
67
 
68
+ # ---- Frecuencia ----
69
+ try:
70
+ from wordfreq import word_frequency, top_n_list
71
+ except Exception:
72
+ top_n_list = None
73
+ def word_frequency(w, lang, minimum=0.0): return 0.0
74
 
75
+ # ---- Normalización ----
76
+ STRIP = str.maketrans("ÁÉÍÓÚÜÑáéíóúüñ", "AEIOUUNaeiouun")
77
  def norm_es(w: str) -> str:
78
+ return re.sub(r"[^a-záéíóúüñ]", "", (w or "").lower()).translate(STRIP)
 
79
  def norm_en(w: str) -> str:
80
+ return re.sub(r"[^a-z]", "", (w or "").lower())
81
 
82
+ def lemma_list_es(words: List[str]) -> List[str]:
83
+ if not USE_SPACY or nlp_es is None:
84
+ return [norm_es(w) for w in words if norm_es(w)]
85
+ doc = nlp_es(" ".join(words))
86
+ out = []
87
+ for t in doc:
88
+ if t.is_alpha:
89
+ out.append(norm_es(t.lemma_))
90
+ return out
91
+
92
+ def lemma_list_en(words: List[str]) -> List[str]:
93
+ if not USE_SPACY or nlp_en is None:
94
+ return [norm_en(w) for w in words if norm_en(w)]
95
+ doc = nlp_en(" ".join(words))
96
+ out = []
97
+ for t in doc:
98
+ if t.is_alpha:
99
+ out.append(norm_en(t.lemma_))
100
+ return out
101
+
102
+ # ---- Descarga OMW (WordNet multilingüe) ----
103
  try:
104
+ wn.download("omw:1.4") # paquete multilingüe clásico
105
+ except Exception as e:
106
+ print("[Aviso] No se pudo descargar omw:1.4 (quizá ya está).", e)
107
+
108
+ # Recolectar lemas ES y sus equivalentes EN por sinset
109
+ print("Extrayendo lemas desde OMW ...")
110
+ spa_lemmas: Dict[str, set] = defaultdict(set) # es_lemma -> set(en_lemma)
111
+ # Recorremos todos los sinsets disponibles y conectamos ES con EN
112
+ for lex in wn.lexicons(): # todos los lexicones instalados
113
  try:
114
+ for ss in wn.synsets(lexicon=lex.id):
115
+ # lemas por idioma en el sinset
116
+ es_lem = [norm_es(w.lemma()) for w in ss.words(lang="spa")]
117
+ en_lem = [norm_en(w.lemma()) for w in ss.words(lang="eng")]
118
+ if not es_lem or not en_lem:
119
+ continue
120
+ for es in es_lem:
121
+ if not es:
122
+ continue
123
+ for en in en_lem:
124
+ if not en:
125
+ continue
126
+ spa_lemmas[es].add(en)
127
  except Exception:
128
+ continue
129
+
130
+ # Lista final de lemas ES
131
+ es_lemmas = list(spa_lemmas.keys())
132
+ # filtro básico: sin números, mínimo 2 letras
133
+ es_lemmas = [w for w in es_lemmas if len(w) >= 2]
134
+ # Prioriza por frecuencia (wordfreq)
135
+ def freq_es(w: str) -> float:
136
+ try:
137
+ return word_frequency(w, "es", minimum=0.0)
138
+ except Exception:
139
+ return 0.0
140
+ es_lemmas.sort(key=lambda w: (-freq_es(w), w))
141
+
142
+ if LIMIT_ES is not None:
143
+ es_lemmas = es_lemmas[:LIMIT_ES]
144
+
145
+ # (opcional) lematiza de nuevo (suaviza duplicados y variantes)
146
+ if USE_SPACY and nlp_es:
147
+ es_lemmas = lemma_list_es(es_lemmas)
148
+
149
+ # dedup preservando orden
150
+ es_lemmas = list(OrderedDict.fromkeys(es_lemmas))
151
+
152
+ # Empareja EN
153
+ es2en: Dict[str, str] = {}
154
+ for es in es_lemmas:
155
+ ens = sorted(spa_lemmas.get(es, []))
156
+ if ens:
157
+ es2en[es] = ens[0] # el primero por orden alfabético (estable)
158
+ elif ARGOS_OK:
159
+ tr = norm_en(argos_es2en(es))
160
+ if tr:
161
+ es2en[es] = tr
162
+ else:
163
+ es2en[es] = "" # sin equivalente EN (no obligatorio)
164
+
165
+ # ---- Alfabetos de los conlangs ----
166
+ ALPHA_MINI = (
167
+ "@ptkmnslraeiouy" # 14
168
+ "0123456789" # +10 = 24
169
+ "><=:/!?.+-_*#" # +13 = 37
170
+ "bcdfghjvqwxz" # +13 = 50
171
+ "ACEGHIJKLMNOPRS" # +16 = 66 (usamos 64 primeros)
172
+ )[:64]
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  CJK_BASE = (
175
  "天地人日月山川雨風星火水木土金石光影花草鳥犬猫魚"
176
  "東西南北中外上下午夜明暗手口目耳心言書家道路門"
 
179
  )
180
  ALPHA_CJK = (CJK_BASE * 10)[:256]
181
 
182
+ # ---- Generación de códigos (por longitud creciente, alfabeto barajado por SEED) ----
183
+ def gen_codes(alphabet: str, max_len: int) -> List[str]:
184
+ codes = []
185
+ # longitud 1
186
+ for ch in alphabet:
187
+ codes.append(ch)
188
+ # longitudes 2..max_len
189
+ def gen_len(L: int):
190
+ if L == 1:
191
+ for ch in alphabet:
192
+ yield ch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  else:
194
+ for prev in gen_len(L-1):
195
+ for ch in alphabet:
196
+ yield prev + ch
197
+ for L in range(2, max_len+1):
198
+ for c in gen_len(L):
199
+ codes.append(c)
200
+ return codes
201
+
202
+ random.seed(SEED)
203
+ alpha_m = list(ALPHA_MINI); random.shuffle(alpha_m); ALPHA_MINI_SHUF = "".join(alpha_m)
204
+ alpha_k = list(ALPHA_CJK ); random.shuffle(alpha_k); ALPHA_CJK_SHUF = "".join(alpha_k)
205
+
206
+ codes_m = gen_codes(ALPHA_MINI_SHUF, MAXLEN_MINI)
207
+ codes_k = gen_codes(ALPHA_CJK_SHUF, MAXLEN_CJK )
208
+
209
+ if len(codes_m) < len(es_lemmas):
210
+ raise ValueError("Sube MAXLEN_MINI: no hay suficientes códigos para Minimax.")
211
+ if len(codes_k) < len(es_lemmas):
212
+ raise ValueError("Sube MAXLEN_CJK: no hay suficientes códigos para Kōmín.")
213
+
214
+ # ---- Asignación por frecuencia (orden de es_lemmas ya está priorizado) ----
215
+ es2mini = {}
216
+ es2komi = {}
217
+ for i, es in enumerate(es_lemmas):
218
+ es2mini[es] = codes_m[i]
219
+ es2komi[es] = codes_k[i]
220
+
221
+ # ---- Guardado ----
222
+ def write_json(path, obj):
223
+ with open(path, "w", encoding="utf-8") as f:
224
+ json.dump(obj, f, ensure_ascii=False, indent=2)
225
+
226
+ def write_tsv(path, rows):
227
+ import csv
228
+ with open(path, "w", encoding="utf-8", newline="") as f:
229
+ w = csv.writer(f, delimiter="\t")
230
+ w.writerows(rows)
231
+
232
+ write_json("lexicon_minimax.json", {
233
+ "lang": "es", "source": "OMW 1.4", "seed": SEED,
234
+ "alphabet": "Minimax-ASCII", "max_len": MAXLEN_MINI,
235
+ "size": len(es2mini), "mapping": es2mini
236
+ })
237
+ write_json("lexicon_komin.json", {
238
+ "lang": "es", "source": "OMW 1.4", "seed": SEED,
239
+ "alphabet": "Kōmín-CJK", "max_len": MAXLEN_CJK,
240
+ "size": len(es2komi), "mapping": es2komi
241
+ })
242
+
243
+ master_rows = [("lemma_es","lemma_en","code_minimax","code_komin")]
244
+ master_json = []
245
+ for es in es_lemmas:
246
+ master_rows.append((es, es2en.get(es, ""), es2mini[es], es2komi[es]))
247
+ master_json.append({
248
+ "lemma_es": es,
249
+ "lemma_en": es2en.get(es, ""),
250
+ "minimax": es2mini[es],
251
+ "komin": es2komi[es]
252
+ })
253
+ write_json("lexicon_master.json", {"seed": SEED, "source":"OMW 1.4", "entries": master_json})
254
+ write_tsv("lexicon_master.tsv", master_rows)
255
+
256
+ print("\n===== RESUMEN =====")
257
+ print(f"Lemas ES extraídos de OMW: {len(es_lemmas)}")
258
+ print("Archivos creados:")
259
+ print(" - lexicon_minimax.json")
260
+ print(" - lexicon_komin.json")
261
+ print(" - lexicon_master.json")
262
+ print(" - lexicon_master.tsv")
263
+ print("Descárgalos desde el panel de archivos de Colab.")
264
 
265
 
266