Romanes commited on
Commit
25c9693
·
verified ·
1 Parent(s): 21f6cbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -115
app.py CHANGED
@@ -1,9 +1,8 @@
1
- # app.py
2
  # -*- coding: utf-8 -*-
3
- import os
4
  import re
5
  import unicodedata
6
  from pathlib import Path
 
7
 
8
  import gradio as gr
9
  import joblib
@@ -11,67 +10,51 @@ import pandas as pd
11
  from scipy import sparse
12
  from sklearn.metrics.pairwise import cosine_similarity
13
 
14
- # ==========================
15
- # Ubicación de artefactos
16
- # ==========================
17
- ART = Path("artifacts")
 
18
  VEC_PATH = ART / "tfidf_vectorizer.joblib"
19
  MAT_PATH = ART / "tfidf_matrix.npz"
20
  IDX_PATH = ART / "doc_index.csv"
21
 
22
- # ==========================
23
- # Utilidades de limpieza
24
- # ==========================
25
- import nltk
26
- from nltk.corpus import stopwords
27
-
28
- def _ensure_nltk():
29
- try:
30
- nltk.data.find("corpora/stopwords")
31
- except LookupError:
32
- nltk.download("stopwords")
33
- _ensure_nltk()
34
-
35
  def strip_accents(s: str) -> str:
36
  return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
37
 
38
- STOPWORDS = {strip_accents(w.lower()) for w in stopwords.words("spanish")} | {"aun"}
39
-
40
- def limpiar_texto(s: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  if not isinstance(s, str):
42
  s = "" if s is None else str(s)
43
  s = strip_accents(s.lower())
44
- s = re.sub(r"[“”„‟‹›«»—–‐-‒–—―\-]", " ", s)
45
- s = re.sub(r"[^\w\s]", " ", s)
46
  s = re.sub(r"\s+", " ", s).strip()
47
  toks = [t for t in s.split() if t not in STOPWORDS and not t.isdigit()]
48
  return " ".join(toks)
49
 
50
- # ==========================
51
- # Reglas heurísticas (ejemplo OPS)
52
- # ==========================
53
- REGLAS = [
54
- {
55
- "keywords": ["ops", "orden de prestacion de servicios", "contrato ops"],
56
- "respuesta": {
57
- "CICP": ("2.1.2.02.02.008", "Servicios prestados a las empresas y servicios de producción"),
58
- "CPC": ("8", "Servicios prestados a las empresas y servicios de producción"),
59
- "UNSPSC":("80111600", "Servicios de personal temporal"),
60
- },
61
- "motivo": "Coincidencia con palabra clave OPS",
62
- },
63
- ]
64
-
65
- def aplicar_reglas(consulta: str):
66
- texto = limpiar_texto(consulta)
67
- for regla in REGLAS:
68
- if any(k in texto for k in regla["keywords"]):
69
- rows = []
70
- for cat, (cod, nom) in regla["respuesta"].items():
71
- rows.append({"Catálogo": cat, "Código": cod, "Nombre": nom, "Similaridad": 1.0, "Origen": "Regla"})
72
- return pd.DataFrame(rows)
73
- return None
74
-
75
  def catalog_tag(source_file: str) -> str:
76
  s = (source_file or "").lower()
77
  if "cicp" in s: return "CICP"
@@ -79,7 +62,7 @@ def catalog_tag(source_file: str) -> str:
79
  if "unspsc" in s: return "UNSPSC"
80
  return "OTRO"
81
 
82
- def parse_code_name(codes_raw: str, text_original: str):
83
  codes_raw = str(codes_raw or "")
84
  text_original = str(text_original or "")
85
  m = re.search(r"CODIGO;NOMBRE:\s*([^;|]+)\s*;\s*([^|]+)", codes_raw, flags=re.I)
@@ -99,75 +82,112 @@ def parse_code_name(codes_raw: str, text_original: str):
99
  if m2 and name is None: name = m2.group(1).strip()
100
  return (code or "").strip(), (name or "").strip()
101
 
102
- # ==========================
103
- # Carga en startup
104
- # ==========================
105
- VEC = joblib.load(VEC_PATH)
106
- MAT = sparse.load_npz(MAT_PATH)
107
- IDX = pd.read_csv(IDX_PATH)
108
- IDX["catalogo"] = IDX["source_file"].apply(catalog_tag)
109
-
110
- # ==========================
111
- # Endpoint de predicción
112
- # ==========================
113
- def predecir(consulta: str, top_por_catalogo: int = 1):
114
- if not consulta or not consulta.strip():
115
- return pd.DataFrame([{"Catálogo": "", "Código": "", "Nombre": "", "Similaridad": 0.0, "Origen": "—"}])
116
-
117
- # 1) Reglas
118
- out_regla = aplicar_reglas(consulta)
119
- if out_regla is not None:
120
- return out_regla.sort_values("Catálogo")
121
-
122
- # 2) Modelo TF-IDF
123
- q = limpiar_texto(consulta)
124
- vec_q = VEC.transform([q])
125
- sims = cosine_similarity(vec_q, MAT)[0]
126
-
127
- df = IDX.copy()
128
- df["Similaridad"] = sims
129
-
130
- frames = []
131
- for cat in ["CICP", "CPC", "UNSPSC"]:
132
- sub = (
133
- df[df["catalogo"] == cat]
134
- .sort_values("Similaridad", ascending=False)
135
- .head(top_por_catalogo)
136
- .copy()
137
- )
138
- parsed = sub.apply(lambda r: parse_code_name(r.get("codes_raw",""), r.get("text_original","")), axis=1)
139
- sub["Código"] = [c for c, _ in parsed]
140
- sub["Nombre"] = [n for _, n in parsed]
141
- sub["Catálogo"] = cat
142
- sub["Origen"] = "TF-IDF"
143
- frames.append(sub[["Catálogo","Código","Nombre","Similaridad","Origen"]])
144
-
145
- res = pd.concat(frames, ignore_index=True)
146
- res["Similaridad"] = res["Similaridad"].round(4)
147
- return res.sort_values("Catálogo")
148
-
149
- # ==========================
150
- # Gradio UI
151
- # ==========================
152
- with gr.Blocks(title="Recomendador CICP / CPC / UNSPSC") as demo:
153
- gr.Markdown("## Recomendador por texto (CICP / CPC / UNSPSC)\n*TF-IDF + reglas*")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  with gr.Row():
155
- consulta = gr.Textbox(label="Descripción técnica", lines=3, placeholder="Ej: Vinculación joven investigadora OPS ...")
156
- topk = gr.Slider(1, 5, value=1, step=1, label="Top por catálogo")
157
  btn = gr.Button("Buscar")
158
- salida = gr.Dataframe(headers=["Catálogo","Código","Nombre","Similaridad","Origen"], interactive=False)
 
159
 
160
- ejemplos = gr.Examples(
161
- examples=[
162
- ["Vinculación joven investigadora, OPS gastos de operación y servicios técnicos", 1],
163
- ["contrato de personal temporal", 1],
164
- ["reactivos de laboratorio para cromatografía hplc", 1],
165
- ],
166
- inputs=[consulta, topk],
167
- label="Ejemplos",
168
- )
169
 
170
- btn.click(predecir, inputs=[consulta, topk], outputs=[salida])
171
 
172
  if __name__ == "__main__":
 
173
  demo.launch()
 
 
1
  # -*- coding: utf-8 -*-
 
2
  import re
3
  import unicodedata
4
  from pathlib import Path
5
+ from typing import Tuple
6
 
7
  import gradio as gr
8
  import joblib
 
10
  from scipy import sparse
11
  from sklearn.metrics.pairwise import cosine_similarity
12
 
13
+ # -----------------------------
14
+ # Rutas (funciona en HF Spaces)
15
+ # -----------------------------
16
+ ROOT = Path(__file__).parent
17
+ ART = ROOT / "artifacts"
18
  VEC_PATH = ART / "tfidf_vectorizer.joblib"
19
  MAT_PATH = ART / "tfidf_matrix.npz"
20
  IDX_PATH = ART / "doc_index.csv"
21
 
22
+ # -----------------------------
23
+ # Limpieza (sin NLTK)
24
+ # -----------------------------
 
 
 
 
 
 
 
 
 
 
25
  def strip_accents(s: str) -> str:
26
  return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c))
27
 
28
+ # stopwords españolas normalizadas (compacta; puedes ampliar)
29
+ STOPWORDS = {
30
+ "a","aca","ahi","ahí","al","algo","algunas","algunos","alla","allá","alli","allí","ante","antes",
31
+ "aquel","aquella","aquellas","aquellos","aqui","aquí","asi","así","aun","aunque","bajo","bien","cabe",
32
+ "cada","casi","cierta","ciertas","cierto","ciertos","como","con","contra","cual","cuales","cualquier",
33
+ "cualesquiera","cuyo","cuya","cuyas","cuyos","de","del","desde","donde","dos","el","ella","ellas",
34
+ "ellos","en","entre","era","erais","eramos","éramos","eran","eres","es","esa","esas","ese","eso",
35
+ "esos","esta","está","estaba","estaban","estamos","estan","están","estar","estas","este","esto",
36
+ "estos","etc","fue","fueron","ha","habia","había","habian","habían","haber","hay","hasta","la","las",
37
+ "le","les","lo","los","mas","más","me","mi","mis","mucha","muchas","mucho","muchos","muy","nada","ni",
38
+ "no","nos","nosotras","nosotros","nuestra","nuestras","nuestro","nuestros","o","otra","otras","otro",
39
+ "otros","para","pero","poco","por","porque","que","qué","quien","quién","quienes","quiénes","se","sea",
40
+ "sean","ser","si","sí","sido","sin","sobre","su","sus","tal","tambien","también","tampoco","tan",
41
+ "tanta","tantas","tanto","te","tenia","tenía","tenian","tenían","tendra","tendrá","tendran","tendrán",
42
+ "tenemos","tengo","ti","tiene","tienen","todo","todos","tu","tus","un","una","unas","uno","unos",
43
+ "usted","ustedes","y","ya"
44
+ }
45
+ # normalizar stopwords
46
+ STOPWORDS = {strip_accents(w.lower()) for w in STOPWORDS} | {"aun"}
47
+
48
+ def clean_text(s: str) -> str:
49
  if not isinstance(s, str):
50
  s = "" if s is None else str(s)
51
  s = strip_accents(s.lower())
52
+ s = re.sub(r"[“”„‟‹›«»—–‐-‒–—―\-]", " ", s) # comillas/guiones unicode
53
+ s = re.sub(r"[^\w\s]", " ", s) # puntuación
54
  s = re.sub(r"\s+", " ", s).strip()
55
  toks = [t for t in s.split() if t not in STOPWORDS and not t.isdigit()]
56
  return " ".join(toks)
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def catalog_tag(source_file: str) -> str:
59
  s = (source_file or "").lower()
60
  if "cicp" in s: return "CICP"
 
62
  if "unspsc" in s: return "UNSPSC"
63
  return "OTRO"
64
 
65
+ def parse_code_name(codes_raw: str, text_original: str) -> Tuple[str, str]:
66
  codes_raw = str(codes_raw or "")
67
  text_original = str(text_original or "")
68
  m = re.search(r"CODIGO;NOMBRE:\s*([^;|]+)\s*;\s*([^|]+)", codes_raw, flags=re.I)
 
82
  if m2 and name is None: name = m2.group(1).strip()
83
  return (code or "").strip(), (name or "").strip()
84
 
85
+ # -----------------------------
86
+ # Reglas duras (ejemplo OPS)
87
+ # -----------------------------
88
+ REGLAS = [
89
+ {
90
+ "keywords": ["ops", "orden de prestacion de servicios", "contrato ops"],
91
+ "respuesta": {
92
+ "CICP": ("2.1.2.02.02.008", "Servicios prestados a las empresas y servicios de producción"),
93
+ "CPC": ("8", "Servicios prestados a las empresas y servicios de producción"),
94
+ "UNSPSC":("80111600", "Servicios de personal temporal"),
95
+ },
96
+ "motivo": "Coincidencia con palabra clave OPS",
97
+ },
98
+ ]
99
+
100
+ def aplicar_reglas(query: str):
101
+ q = clean_text(query)
102
+ for r in REGLAS:
103
+ if any(k in q for k in r["keywords"]):
104
+ df = pd.DataFrame(
105
+ [{"Catálogo": k, "Código": v[0], "Nombre": v[1], "Similaridad": 1.0} for k, v in r["respuesta"].items()]
106
+ )
107
+ return df, r["motivo"]
108
+ return None, None
109
+
110
+ # -----------------------------
111
+ # Carga perezosa de artefactos
112
+ # -----------------------------
113
+ VECTOR = None
114
+ MATRIX = None
115
+ INDEX = None
116
+
117
+ def ensure_loaded():
118
+ global VECTOR, MATRIX, INDEX
119
+ if VECTOR is None:
120
+ VECTOR = joblib.load(VEC_PATH)
121
+ if MATRIX is None:
122
+ MATRIX = sparse.load_npz(MAT_PATH)
123
+ if INDEX is None:
124
+ INDEX = pd.read_csv(IDX_PATH)
125
+
126
+ # -----------------------------
127
+ # Motor TF-IDF agrupado
128
+ # -----------------------------
129
+ def recomendar(query: str, k: int):
130
+ try:
131
+ # 1) Reglas
132
+ df_regla, motivo = aplicar_reglas(query)
133
+ if df_regla is not None:
134
+ return df_regla, f"⚙️ Regla activada: {motivo}"
135
+
136
+ # 2) Modelo
137
+ ensure_loaded()
138
+ q = clean_text(query)
139
+ if not q:
140
+ return pd.DataFrame(), "La consulta quedó vacía tras limpieza."
141
+
142
+ xq = VECTOR.transform([q])
143
+ sims = cosine_similarity(xq, MATRIX).flatten()
144
+
145
+ df = INDEX.copy()
146
+ df["Similaridad"] = sims
147
+ df["Catálogo"] = df["source_file"].apply(catalog_tag)
148
+
149
+ # top-k por catálogo
150
+ out = []
151
+ for cat in ["CICP", "CPC", "UNSPSC"]:
152
+ sub = (
153
+ df[df["Catálogo"] == cat]
154
+ .sort_values("Similaridad", ascending=False)
155
+ .head(int(k))
156
+ .copy()
157
+ )
158
+ if sub.empty:
159
+ continue
160
+ parsed = sub.apply(lambda r: parse_code_name(r.get("codes_raw",""), r.get("text_original","")), axis=1)
161
+ sub["Código"] = [c for c, _ in parsed]
162
+ sub["Nombre"] = [n for _, n in parsed]
163
+ out.append(sub[["Catálogo", "Código", "Nombre", "Similaridad"]])
164
+
165
+ if not out:
166
+ return pd.DataFrame(), "Sin candidatos."
167
+ res = pd.concat(out, ignore_index=True)
168
+ return res, "OK"
169
+ except Exception as e:
170
+ # mostrará el error en la interfaz
171
+ return pd.DataFrame(), f"Error: {type(e).__name__}: {e}"
172
+
173
+ # -----------------------------
174
+ # Interfaz Gradio
175
+ # -----------------------------
176
+ with gr.Blocks(title="Recomendador por texto (CICP / CPC / UNSPSC)") as demo:
177
+ gr.Markdown("# Recomendador por texto (CICP / CPC / UNSPSC)\n\n_TF-IDF + reglas_")
178
  with gr.Row():
179
+ query = gr.Textbox(label="Descripción técnica", placeholder="reactivos de laboratorio para cromatografía hplc", lines=3)
180
+ k = gr.Slider(1, 5, value=1, step=1, label="Top por catálogo")
181
  btn = gr.Button("Buscar")
182
+ out = gr.Dataframe(headers=["Catálogo","Código","Nombre","Similaridad"], label="Resultados", wrap=True)
183
+ status = gr.Markdown()
184
 
185
+ def _on_click(q, topk):
186
+ df, msg = recomendar(q, topk)
187
+ return df, (f"**Estado:** {msg}" if msg else "")
 
 
 
 
 
 
188
 
189
+ btn.click(_on_click, inputs=[query, k], outputs=[out, status])
190
 
191
  if __name__ == "__main__":
192
+ # Para pruebas locales
193
  demo.launch()