| import os |
| import re |
| import json |
| import time |
| import sqlite3 |
| import textwrap |
| from pathlib import Path |
| from datetime import datetime |
| from uuid import uuid4 |
| from typing import Dict, List, Any, Optional, Tuple |
|
|
| import gradio as gr |
| from langdetect import detect, LangDetectException |
| from PIL import Image, ImageDraw, ImageFont |
|
|
| try: |
| from huggingface_hub import InferenceClient |
| except Exception: |
| InferenceClient = None |
|
|
| try: |
| from sentence_transformers import SentenceTransformer, util |
| except Exception: |
| SentenceTransformer = None |
| util = None |
|
|
|
|
| APP_NAME = "Sistema de Validación y Gestión de Retos Complejos" |
| BASE_DIR = Path(__file__).resolve().parent |
| DATA_DIR = BASE_DIR / "data" |
| TMP_DIR = BASE_DIR / "tmp" |
| CONFIG_DIR = BASE_DIR / "config" |
| PROMPTS_DIR = CONFIG_DIR / "prompts" |
| ASSETS_DIR = BASE_DIR / "assets" |
| DB_PATH = DATA_DIR / "app.db" |
| CONFIG_PATH = CONFIG_DIR / "settings.json" |
| BANNED_WORDS_PATH = CONFIG_DIR / "banned_words.txt" |
| EXAMPLES_PATH = CONFIG_DIR / "examples.json" |
| LOGO_PATH = ASSETS_DIR / "program_logo.png" |
|
|
| DEFAULT_SETTINGS = { |
| "app_name": APP_NAME, |
| "min_chars_reto": 100, |
| "max_chars_reto": 2200, |
| "require_remote_eval": False, |
| "prefer_local_semantic_eval": True, |
| "show_remote_errors": True, |
| "canvas": { |
| "width": 1920, |
| "height": 1360, |
| "dpi": 150, |
| "header_color": "#2563eb", |
| "background_color": "#f6f8fc", |
| "panel_bg": "#ffffff", |
| "panel_border": "#dbe1ea", |
| "text_color": "#111827", |
| "accent_color": "#16a34a", |
| }, |
| "local_semantic_model": "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2", |
| "remote_models": [ |
| {"name": "Qwen/Qwen2.5-7B-Instruct", "mode": "chat"}, |
| {"name": "mistralai/Mistral-7B-Instruct-v0.3", "mode": "chat"}, |
| {"name": "meta-llama/Meta-Llama-3-8B-Instruct", "mode": "chat"}, |
| {"name": "google/gemma-2-9b-it", "mode": "chat"}, |
| {"name": "microsoft/Phi-3-mini-4k-instruct", "mode": "text"}, |
| ], |
| "assist_models": [ |
| {"name": "Qwen/Qwen2.5-7B-Instruct", "mode": "chat"}, |
| {"name": "mistralai/Mistral-7B-Instruct-v0.3", "mode": "chat"}, |
| {"name": "google/gemma-2-9b-it", "mode": "chat"}, |
| {"name": "microsoft/Phi-3-mini-4k-instruct", "mode": "text"}, |
| ], |
| } |
|
|
| DEFAULT_BANNED_WORDS = [ |
| "idiota", "estúpido", "imbécil", "mierda", "maldito" |
| ] |
|
|
| DEFAULT_EXAMPLES = [ |
| { |
| "title": "Salud comunitaria", |
| "text": "¿Cómo pueden comunidades urbanas y rurales, instituciones públicas, organizaciones de base y redes de apoyo emocional crear mecanismos de acompañamiento que funcionen en contextos de diversidad cultural y económica, sin imponer un modelo único, probando intervenciones pequeñas y ajustando según lo que emerja?" |
| }, |
| { |
| "title": "Educación y abandono escolar", |
| "text": "¿Cómo podrían escuelas, familias, municipalidades, empresas y organizaciones comunitarias reducir el abandono escolar en territorios con violencia y precariedad, considerando intereses diversos, restricciones legales y éticas, y aprendiendo de pilotos locales seguros para fallar antes de escalar?" |
| }, |
| { |
| "title": "Gobernanza hídrica", |
| "text": "¿Cómo podrían comunidades, alcaldías, agricultores, empresas, autoridades ambientales y centros de investigación coordinar una gobernanza colaborativa del agua ante sequías impredecibles, interdependencias territoriales y prioridades en conflicto, mediante experimentos adaptativos y aprendizaje continuo?" |
| } |
| ] |
|
|
| PROMPT_EVAL = """Eres un evaluador experto de retos complejos según Cynefin. |
| No uses coincidencias de palabras superficiales. Evalúa el sentido del reto. |
| Evalúa el reto con base en estas 7 características |
| C1 incertidumbre_alta |
| C2 multiplicidad_de_actores |
| C3 interdependencia |
| C4 emergencia_sobre_imposicion |
| C5 gestion_por_restricciones |
| C6 experimentacion_probe_sense_respond |
| C7 aprendizaje_y_adaptacion |
| Usa como referencia conceptual estos ejemplos |
| {fewshot} |
| Reglas importantes |
| - Un reto puede ser sólido aunque no mencione literalmente las palabras "incertidumbre", "interdependencia" o "aprendizaje". |
| - Si el sentido del reto coincide con los ejemplos guía, reconoce ese patrón. |
| - Justifica en lenguaje claro. |
| - No castigues por estilo de redacción si la estructura conceptual está presente. |
| Devuelve SOLO JSON válido con este formato |
| {{ |
| "global_score": 0-100, |
| "global_label": "fuertemente_complejo|prometedor|parcialmente_complejo|debil", |
| "global_summary": "máximo 120 palabras", |
| "strengths": ["...", "...", "..."], |
| "gaps": ["...", "...", "..."], |
| "criteria": {{ |
| "C1": {{"label":"Incertidumbre alta","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }}, |
| "C2": {{"label":"Multiplicidad de actores","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }}, |
| "C3": {{"label":"Interdependencia","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }}, |
| "C4": {{"label":"Emergencia sobre imposición","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }}, |
| "C5": {{"label":"Gestión por restricciones","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }}, |
| "C6": {{"label":"Experimentación","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }}, |
| "C7": {{"label":"Aprendizaje y adaptación","score":0-100,"status":"ALTO|MEDIO|BAJO","justification":"..." }} |
| }}, |
| "rewrite_suggestion": "una mejor versión del reto en una sola pregunta" |
| }} |
| Reto |
| {reto} |
| """ |
|
|
| PROMPT_SECTION = """Eres un asistente para llenar un canvas de reto complejo. |
| Responde SOLO JSON válido |
| {{"answer":"texto breve, claro y útil"}} |
| Reto |
| {reto} |
| Sección objetivo |
| {section_name} |
| Pregunta |
| {question} |
| Contexto ya completado |
| {context} |
| """ |
|
|
| FEWSHOT_REFERENCE = """ |
| Ejemplos de retos bien formulados |
| 1. ¿Cómo pueden comunidades urbanas y rurales, instituciones públicas, organizaciones de base y redes de apoyo emocional crear mecanismos de acompañamiento que funcionen en contextos de diversidad cultural y económica, sin imponer un modelo único, probando intervenciones pequeñas y ajustando según lo que emerja? |
| 2. ¿Cómo podrían escuelas, familias, municipalidades, empresas y organizaciones comunitarias reducir el abandono escolar en territorios con violencia y precariedad, considerando intereses diversos, restricciones legales y éticas, y aprendiendo de pilotos locales seguros para fallar antes de escalar? |
| 3. ¿Cómo podrían comunidades, alcaldías, agricultores, empresas, autoridades ambientales y centros de investigación coordinar una gobernanza colaborativa del agua ante sequías impredecibles, interdependencias territoriales y prioridades en conflicto, mediante experimentos adaptativos y aprendizaje continuo? |
| """ |
|
|
| CYNEFIN_LABELS = { |
| "C1": "Incertidumbre alta", |
| "C2": "Multiplicidad de actores", |
| "C3": "Interdependencia", |
| "C4": "Emergencia sobre imposición", |
| "C5": "Gestión por restricciones", |
| "C6": "Experimentación", |
| "C7": "Aprendizaje y adaptación", |
| } |
|
|
| WIZARD_STEPS = [ |
| ("reto", "Reto"), |
| ("definicion", "Definición"), |
| ("relevancia", "Relevancia"), |
| ("conexion", "Conexión"), |
| ("actores", "Actores"), |
| ("gobernanza", "Gobernanza"), |
| ("iniciativas", "Iniciativas"), |
| ("resumen", "Resumen"), |
| ] |
|
|
| SECTION_META = { |
| "definicion": ("Definición", "¿Qué aspectos fundamentales definen el reto propuesto?"), |
| "relevancia": ("Relevancia", "¿Por qué creemos que este reto es relevante?"), |
| "conexion": ("Conexión personal", "¿Cómo se conecta el reto con mi ámbito de trabajo?"), |
| "gobernanza": ("Gobernanza", "¿Por qué la gobernanza colaborativa y anticipatoria puede contribuir a abordar el reto?"), |
| "iniciativas": ("Iniciativas", "¿Qué iniciativas, alianzas, redes o proyectos conocemos relacionados con el reto?"), |
| } |
|
|
| LOCAL_MODEL_CACHE = {"model": None, "name": None} |
| ANCHOR_CACHE = {} |
|
|
| ANCHORS = { |
| "C1": [ |
| "El reto ocurre en un contexto incierto, cambiante e impredecible donde no existe una solución obvia.", |
| "La situación depende del contexto y requiere explorar varias posibilidades antes de decidir.", |
| ], |
| "C2": [ |
| "Hay múltiples actores con intereses, capacidades y responsabilidades distintas que deben intervenir.", |
| "Comunidades, instituciones, empresas, familias y organizaciones participan en el reto.", |
| ], |
| "C3": [ |
| "Los elementos del problema están interconectados y una decisión afecta a las demás partes del sistema.", |
| "Existen interdependencias territoriales, sociales o institucionales entre los actores y variables.", |
| ], |
| "C4": [ |
| "La respuesta debe emerger de coordinación, interacción y ajuste, no de una imposición lineal.", |
| "La solución necesita construirse de forma colaborativa y adaptativa.", |
| ], |
| "C5": [ |
| "Existen restricciones éticas, legales, institucionales, territoriales o presupuestarias que limitan la acción.", |
| "El reto exige trabajar dentro de límites y condiciones habilitadoras.", |
| ], |
| "C6": [ |
| "El reto se beneficia de pilotos, pruebas pequeñas, experimentación y aprendizaje antes de escalar.", |
| "Conviene probar, observar y ajustar en lugar de ejecutar una receta cerrada desde el inicio.", |
| ], |
| "C7": [ |
| "La respuesta exige aprendizaje continuo, adaptación y corrección a medida que aparecen nuevos patrones.", |
| "El sistema debe observar, aprender y reconfigurarse continuamente.", |
| ], |
| } |
|
|
| EVIDENCE_TERMS = { |
| "C1": ["incertid", "impredec", "diversidad", "conflicto", "tensión", "precariedad", "contexto", "cambiante", "volátil", "ambig"], |
| "C2": ["comunidades", "instituciones", "organizaciones", "familias", "empresas", "alcaldías", "autoridades", "escuelas", "redes", "actores", "agricultores", "centros"], |
| "C3": ["coordinar", "interdepend", "territorial", "sistém", "múltiples", "conjunto", "redes", "gobernanza", "afecta", "articul"], |
| "C4": ["sin imponer", "emerja", "colabor", "co-crear", "ajustando", "mecanismos", "acompañamiento", "particip", "adaptativa"], |
| "C5": ["restric", "ética", "legal", "condiciones", "límites", "prioridades", "presupuesto", "violencia", "precariedad", "regulación"], |
| "C6": ["piloto", "probar", "intervenciones pequeñas", "experimento", "safe to fail", "ajustando", "antes de escalar", "iter", "observ"], |
| "C7": ["aprendiz", "adapt", "ajustando", "continuo", "iter", "según lo que emerja", "aprendiendo", "resilien", "reconfigur"], |
| } |
|
|
|
|
| def bootstrap() -> None: |
| for p in [DATA_DIR, TMP_DIR, CONFIG_DIR, PROMPTS_DIR, ASSETS_DIR]: |
| p.mkdir(parents=True, exist_ok=True) |
|
|
| if not CONFIG_PATH.exists(): |
| CONFIG_PATH.write_text(json.dumps(DEFAULT_SETTINGS, ensure_ascii=False, indent=2), encoding="utf-8") |
|
|
| if not BANNED_WORDS_PATH.exists(): |
| BANNED_WORDS_PATH.write_text("\n".join(DEFAULT_BANNED_WORDS), encoding="utf-8") |
|
|
| if not EXAMPLES_PATH.exists(): |
| EXAMPLES_PATH.write_text(json.dumps(DEFAULT_EXAMPLES, ensure_ascii=False, indent=2), encoding="utf-8") |
|
|
| prompt_files = { |
| "eval.txt": PROMPT_EVAL, |
| "section.txt": PROMPT_SECTION, |
| } |
| for name, content in prompt_files.items(): |
| path = PROMPTS_DIR / name |
| path.write_text(content, encoding="utf-8") |
|
|
| if not LOGO_PATH.exists(): |
| create_placeholder_logo() |
|
|
| init_db() |
|
|
|
|
| def create_placeholder_logo(): |
| img = Image.new("RGBA", (900, 220), (255, 255, 255, 0)) |
| draw = ImageDraw.Draw(img) |
| font_big = ImageFont.load_default() |
| draw.rounded_rectangle((20, 20, 880, 200), radius=24, fill=(37, 99, 235, 255)) |
| draw.text((50, 70), "Programa Formativo • Valores e Instituciones Democráticas", fill="white", font=font_big) |
| img.save(LOGO_PATH) |
|
|
|
|
| def load_settings() -> Dict[str, Any]: |
| return json.loads(CONFIG_PATH.read_text(encoding="utf-8")) |
|
|
|
|
| def init_db() -> None: |
| con = sqlite3.connect(DB_PATH) |
| cur = con.cursor() |
| cur.execute(""" |
| CREATE TABLE IF NOT EXISTS evaluations ( |
| id TEXT PRIMARY KEY, |
| user_id TEXT, |
| reto_text TEXT, |
| result_json TEXT, |
| created_at TEXT |
| ) |
| """) |
| cur.execute(""" |
| CREATE TABLE IF NOT EXISTS canvases ( |
| id TEXT PRIMARY KEY, |
| user_id TEXT, |
| reto_text TEXT, |
| canvas_json TEXT, |
| png_path TEXT, |
| created_at TEXT |
| ) |
| """) |
| con.commit() |
| con.close() |
|
|
|
|
| def get_hf_token() -> str: |
| return os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN") or "" |
|
|
|
|
| def build_client() -> Optional[Any]: |
| token = get_hf_token() |
| if not token or not token.startswith("hf_") or InferenceClient is None: |
| return None |
| try: |
| return InferenceClient(api_key=token) |
| except Exception: |
| return None |
|
|
|
|
| def read_text(path: Path, default: str = "") -> str: |
| return path.read_text(encoding="utf-8") if path.exists() else default |
|
|
|
|
| def normalize_text(s: str) -> str: |
| return re.sub(r"\s+", " ", (s or "").strip().lower()) |
|
|
|
|
| def detect_spanish(text: str) -> bool: |
| try: |
| return detect(text) == "es" |
| except LangDetectException: |
| return False |
|
|
|
|
| def get_banned_words() -> List[str]: |
| return [w.strip().lower() for w in read_text(BANNED_WORDS_PATH).splitlines() if w.strip()] |
|
|
|
|
| def contains_offensive_content(text: str) -> bool: |
| lowered = normalize_text(text) |
| return any(word in lowered for word in get_banned_words()) |
|
|
|
|
| def l0_validate(reto: str) -> Tuple[bool, str]: |
| s = load_settings() |
| text = (reto or "").strip() |
| if len(text) < s["min_chars_reto"]: |
| return False, "El reto debe tener al menos 100 caracteres." |
| if len(text) > s["max_chars_reto"]: |
| return False, "El reto no debe exceder 2200 caracteres." |
| if not detect_spanish(text): |
| return False, "El reto debe estar escrito en español." |
| if "?" not in text and "¿" not in text: |
| return False, "El reto debe estar formulado como pregunta." |
| if contains_offensive_content(text): |
| return False, "El contenido no cumple con las políticas de uso." |
| return True, "OK" |
|
|
|
|
| def extract_json(raw: str) -> Dict[str, Any]: |
| raw = (raw or "").strip() |
| if not raw: |
| return {} |
| try: |
| return json.loads(raw) |
| except Exception: |
| pass |
| match = re.search(r"\{.*\}", raw, flags=re.S) |
| if match: |
| try: |
| return json.loads(match.group(0)) |
| except Exception: |
| return {} |
| return {} |
|
|
|
|
| def try_remote_model(model_name: str, mode: str, prompt: str, max_new_tokens: int = 1200) -> Tuple[Dict[str, Any], Optional[str]]: |
| client = build_client() |
| if client is None: |
| return {}, "Cliente remoto no disponible" |
| try: |
| if mode == "chat": |
| response = client.chat.completions.create( |
| model=model_name, |
| messages=[ |
| {"role": "system", "content": "Responde solamente JSON válido, sin markdown."}, |
| {"role": "user", "content": prompt}, |
| ], |
| max_tokens=max_new_tokens, |
| temperature=0.1, |
| ) |
| text = response.choices[0].message.content |
| return extract_json(text), None |
| text = client.text_generation( |
| prompt, |
| model=model_name, |
| max_new_tokens=max_new_tokens, |
| temperature=0.1, |
| return_full_text=False, |
| ) |
| return extract_json(text), None |
| except Exception as e: |
| return {}, f"{model_name} [{mode}] -> {str(e)}" |
|
|
|
|
| def ping_remote_eval() -> Tuple[bool, str]: |
| token = get_hf_token() |
| if not token or not token.startswith("hf_"): |
| return False, "No hay token HF_TOKEN configurado en Secrets." |
| errors = [] |
| for spec in load_settings()["remote_models"]: |
| out, err = try_remote_model(spec["name"], spec.get("mode", "chat"), 'Responde SOLO JSON válido {"ok": true}', 40) |
| if isinstance(out, dict) and out.get("ok") is True: |
| return True, f"Disponible con {spec['name']}" |
| if err: |
| errors.append(err) |
| return False, "Sin respuesta válida. " + " | ".join(errors[:3]) |
|
|
|
|
| def token_set(text: str) -> set: |
| tokens = re.findall(r"[a-záéíóúñü]{4,}", normalize_text(text)) |
| stop = {"como", "pueden", "podrian", "deben", "estar", "para", "entre", "desde", "sobre", "mismo", "misma", "estas", "estos", "ellas", "ellos", "donde", "hacer", "podría", "cómo"} |
| return {t for t in tokens if t not in stop} |
|
|
|
|
| def jaccard(a: set, b: set) -> float: |
| if not a or not b: |
| return 0.0 |
| return len(a & b) / max(1, len(a | b)) |
|
|
|
|
| def count_actor_signals(text: str) -> int: |
| actor_terms = [ |
| "comunidades", "instituciones", "organizaciones", "familias", "empresas", |
| "alcaldías", "autoridades", "centros", "municipalidades", "escuelas", |
| "redes", "gobierno", "actores", "agricultores" |
| ] |
| t = normalize_text(text) |
| return sum(1 for term in actor_terms if term in t) |
|
|
|
|
| def evidence_terms_found(text: str, cid: str) -> List[str]: |
| t = normalize_text(text) |
| found = [] |
| for term in EVIDENCE_TERMS.get(cid, []): |
| if term in t: |
| found.append(term) |
| return found[:6] |
|
|
|
|
| def extract_evidence_snippets(text: str, terms: List[str], limit: int = 3) -> List[str]: |
| src = re.sub(r"\s+", " ", (text or "").strip()) |
| lowered = src.lower() |
| snippets = [] |
| for term in terms: |
| pos = lowered.find(term.lower()) |
| if pos >= 0: |
| start = max(0, pos - 35) |
| end = min(len(src), pos + len(term) + 45) |
| snippet = src[start:end].strip() |
| if start > 0: |
| snippet = "…" + snippet |
| if end < len(src): |
| snippet = snippet + "…" |
| if snippet not in snippets: |
| snippets.append(snippet) |
| if len(snippets) >= limit: |
| break |
| return snippets |
|
|
|
|
| def clamp(v: float, lo: float, hi: float) -> float: |
| return max(lo, min(hi, v)) |
|
|
|
|
| def map_similarity_to_score(sim: float) -> int: |
| if sim <= 0.18: |
| return 20 |
| if sim >= 0.62: |
| return 92 |
| scaled = 20 + ((sim - 0.18) / (0.62 - 0.18)) * (92 - 20) |
| return int(round(clamp(scaled, 20, 92))) |
|
|
|
|
| def human_status(score: int) -> str: |
| if score >= 80: |
| return "ALTO" |
| if score >= 50: |
| return "MEDIO" |
| return "BAJO" |
|
|
|
|
| def label_for_global(score: int) -> str: |
| if score >= 80: |
| return "fuertemente_complejo" |
| if score >= 60: |
| return "prometedor" |
| if score >= 40: |
| return "parcialmente_complejo" |
| return "debil" |
|
|
|
|
| def get_local_semantic_model() -> Tuple[Optional[Any], Optional[str]]: |
| model_name = load_settings().get("local_semantic_model") |
| if SentenceTransformer is None: |
| return None, "sentence-transformers no está instalado" |
| if LOCAL_MODEL_CACHE["model"] is not None and LOCAL_MODEL_CACHE["name"] == model_name: |
| return LOCAL_MODEL_CACHE["model"], None |
| try: |
| model = SentenceTransformer(model_name) |
| LOCAL_MODEL_CACHE["model"] = model |
| LOCAL_MODEL_CACHE["name"] = model_name |
| return model, None |
| except Exception as e: |
| return None, str(e) |
|
|
|
|
| def get_anchor_embeddings(model) -> Dict[str, Any]: |
| model_name = load_settings().get("local_semantic_model") |
| cache_key = f"anchors::{model_name}" |
| if cache_key in ANCHOR_CACHE: |
| return ANCHOR_CACHE[cache_key] |
| out = {} |
| for cid, texts in ANCHORS.items(): |
| out[cid] = model.encode(texts, convert_to_tensor=True) |
| out["_examples"] = model.encode([ex["text"] for ex in DEFAULT_EXAMPLES], convert_to_tensor=True) |
| ANCHOR_CACHE[cache_key] = out |
| return out |
|
|
|
|
| def trim_text_to_fit(text: str, max_len: int) -> str: |
| text = (text or "").strip() |
| if len(text) <= max_len: |
| return text |
| return text[:max_len - 3].rstrip() + "..." |
|
|
|
|
| def local_rewrite_suggestion(reto: str) -> str: |
| text = re.sub(r"[¿?]", "", (reto or "").strip()) |
| text = text[:1].lower() + text[1:] if text else text |
| actors = [] |
| for term in ["comunidades", "instituciones", "organizaciones", "familias", "empresas", "alcaldías", "autoridades", "escuelas", "redes"]: |
| if term in normalize_text(text) and term not in actors: |
| actors.append(term) |
| actor_segment = ", ".join(actors[:4]) if actors else "actores públicos, privados y comunitarios" |
| if "sin imponer" not in normalize_text(text): |
| ending = "sin imponer una solución única, probando respuestas pequeñas y ajustando según la evidencia que emerja" |
| else: |
| ending = "probando respuestas pequeñas y ajustando según la evidencia que emerja" |
| core = trim_text_to_fit(text, 190) |
| return f"¿Cómo podrían {actor_segment} abordar {core} {ending}?" |
|
|
|
|
| def build_local_explanation(result: Dict[str, Any]) -> str: |
| lines = [] |
| lines.append("El resultado actual proviene del motor local del Space.") |
| lines.append("Primero se transforma el reto en un embedding multilingüe.") |
| lines.append("Luego se compara con anclas conceptuales de Cynefin para cada criterio.") |
| lines.append("Después se añade señal explícita por términos observables y una comparación con ejemplos guía.") |
| lines.append("Por último se promedia C1 a C7 y, si la similitud con ejemplos guía es alta, se aplica un piso de calibración.") |
| if result.get("remote_errors"): |
| lines.append("El modo remoto se intentó primero, pero no respondió de forma válida.") |
| return " ".join(lines) |
|
|
|
|
| def fallback_eval_semantic(reto: str) -> Dict[str, Any]: |
| model, err = get_local_semantic_model() |
| if model is None or util is None: |
| return fallback_eval_heuristic(reto, semantic_unavailable=err) |
|
|
| reto_embedding = model.encode([reto], convert_to_tensor=True) |
| anchor_embeddings = get_anchor_embeddings(model) |
| example_scores = util.cos_sim(reto_embedding, anchor_embeddings["_examples"])[0] |
| example_sim = float(example_scores.max().item()) |
|
|
| criteria = {} |
| scores = [] |
| breakdown = {} |
|
|
| for cid in CYNEFIN_LABELS: |
| sims = util.cos_sim(reto_embedding, anchor_embeddings[cid])[0] |
| anchor_sim = float(sims.max().item()) |
| semantic_score = map_similarity_to_score(anchor_sim) |
|
|
| terms = evidence_terms_found(reto, cid) |
| hits = len(terms) |
| signal_score = min(100, 22 + hits * 14) |
| example_score = map_similarity_to_score(example_sim) |
|
|
| actor_boost = min(18, count_actor_signals(reto) * 3) if cid == "C2" else 0 |
| blended = (semantic_score * 0.55) + (signal_score * 0.25) + (example_score * 0.20) + actor_boost |
| final_score = int(round(clamp(blended, 18, 95))) |
| status = human_status(final_score) |
| snippets = extract_evidence_snippets(reto, terms) |
|
|
| just_parts = [ |
| f"Similitud conceptual {anchor_sim:.2f}", |
| f"similitud con ejemplos {example_sim:.2f}", |
| f"señales explícitas {hits}", |
| ] |
| if actor_boost: |
| just_parts.append(f"boost de actores {actor_boost}") |
| criteria[cid] = { |
| "label": CYNEFIN_LABELS[cid], |
| "score": final_score, |
| "status": status, |
| "justification": "Modelo semántico local. " + ", ".join(just_parts) + ".", |
| "evidence_terms": terms, |
| "evidence_snippets": snippets, |
| } |
| breakdown[cid] = { |
| "anchor_similarity": round(anchor_sim, 3), |
| "semantic_score": semantic_score, |
| "signal_hits": hits, |
| "signal_score": signal_score, |
| "example_similarity": round(example_sim, 3), |
| "example_score": example_score, |
| "actor_boost": actor_boost, |
| "weight_semantic": 0.55, |
| "weight_signals": 0.25, |
| "weight_examples": 0.20, |
| "final_score": final_score, |
| } |
| scores.append(final_score) |
|
|
| raw_global = int(round(sum(scores) / len(scores))) |
| global_score = raw_global |
| calibration_note = "Sin calibración adicional." |
| if example_sim >= 0.58 and global_score < 72: |
| global_score = 72 |
| calibration_note = "Piso aplicado por alta similitud semántica con ejemplos guía." |
| if example_sim >= 0.68 and global_score < 78: |
| global_score = 78 |
| calibration_note = "Piso aplicado por similitud muy alta con ejemplos guía." |
|
|
| strengths = [criteria[c]["label"] for c in criteria if criteria[c]["score"] >= 70][:3] |
| gaps = [criteria[c]["label"] for c in criteria if criteria[c]["score"] < 60][:3] |
|
|
| result = { |
| "global_score": global_score, |
| "global_label": label_for_global(global_score), |
| "global_summary": "Resultado generado con evaluación semántica local. Usa embeddings multilingües, anclas conceptuales por criterio, similitud con ejemplos guía y señales conceptuales.", |
| "strengths": strengths or ["Rasgos de complejidad parcialmente visibles"], |
| "gaps": gaps or ["Conviene explicitar mejor el mecanismo adaptativo"], |
| "criteria": criteria, |
| "rewrite_suggestion": local_rewrite_suggestion(reto), |
| "remote_used": False, |
| "local_breakdown": { |
| "method": "semantic-local", |
| "model": load_settings().get("local_semantic_model"), |
| "example_similarity_max": round(example_sim, 3), |
| "raw_global_score": raw_global, |
| "adjusted_global_score": global_score, |
| "per_criterion": breakdown, |
| "calibration_note": calibration_note, |
| "global_formula": "promedio simple de C1 a C7", |
| "score_formula": "0.55*score_semantico + 0.25*score_senales + 0.20*score_ejemplos + boost_actores", |
| } |
| } |
| result["global_summary"] = build_local_explanation(result) |
| return result |
|
|
|
|
| def fallback_eval_heuristic(reto: str, semantic_unavailable: Optional[str] = None) -> Dict[str, Any]: |
| t = normalize_text(reto) |
| examples_tokens = [token_set(ex["text"]) for ex in DEFAULT_EXAMPLES] |
| reto_tokens = token_set(reto) |
| sim = max([jaccard(reto_tokens, ex_t) for ex_t in examples_tokens] + [0.0]) |
|
|
| criteria = {} |
| scores = [] |
| breakdown = {} |
|
|
| for cid, patterns in EVIDENCE_TERMS.items(): |
| terms = [p for p in patterns if p in t][:6] |
| hits = len(terms) |
| base = 28 |
| signal_boost = hits * 13 |
| actor_boost = min(20, count_actor_signals(reto) * 4) if cid == "C2" else 0 |
| sim_boost = 0 |
| if sim >= 0.18: |
| sim_boost += 12 |
| if sim >= 0.28: |
| sim_boost += 8 |
| score = int(max(18, min(95, base + signal_boost + actor_boost + sim_boost))) |
| criteria[cid] = { |
| "label": CYNEFIN_LABELS[cid], |
| "score": score, |
| "status": human_status(score), |
| "justification": f"Evaluación heurística local. Señales {hits}, similitud con ejemplos {sim:.2f}.", |
| "evidence_terms": terms, |
| "evidence_snippets": extract_evidence_snippets(reto, terms), |
| } |
| breakdown[cid] = { |
| "base": base, |
| "signal_hits": hits, |
| "signal_boost": signal_boost, |
| "actor_boost": actor_boost, |
| "sim_boost": sim_boost, |
| "final_score": score, |
| } |
| scores.append(score) |
|
|
| raw_global = int(round(sum(scores) / len(scores))) |
| global_score = raw_global |
| calibration_note = "Sin calibración adicional." |
| if sim >= 0.24 and global_score < 68: |
| global_score = 68 |
| calibration_note = "Se aplicó piso por similitud moderada con ejemplos guía." |
| if sim >= 0.32 and global_score < 75: |
| global_score = 75 |
| calibration_note = "Se aplicó piso por alta similitud con ejemplos guía." |
|
|
| strengths = [criteria[c]["label"] for c in criteria if criteria[c]["score"] >= 70][:3] |
| gaps = [criteria[c]["label"] for c in criteria if criteria[c]["score"] < 60][:3] |
|
|
| result = { |
| "global_score": global_score, |
| "global_label": label_for_global(global_score), |
| "global_summary": "Resultado generado con evaluación heurística local. Usa similitud léxica con ejemplos guía, señales conceptuales y conteo de actores.", |
| "strengths": strengths or ["Rasgos de complejidad parcialmente visibles"], |
| "gaps": gaps or ["Conviene explicitar mejor el mecanismo adaptativo"], |
| "criteria": criteria, |
| "rewrite_suggestion": local_rewrite_suggestion(reto), |
| "remote_used": False, |
| "local_breakdown": { |
| "method": "heuristic-local", |
| "semantic_unavailable": semantic_unavailable or "", |
| "similarity_max": round(sim, 3), |
| "raw_global_score": raw_global, |
| "adjusted_global_score": global_score, |
| "per_criterion": breakdown, |
| "calibration_note": calibration_note, |
| "global_formula": "promedio simple de C1 a C7", |
| "score_formula": "base + boosts por señales, ejemplos y actores", |
| } |
| } |
| result["global_summary"] = build_local_explanation(result) |
| return result |
|
|
|
|
| def evaluate_reto_with_llm(reto: str) -> Dict[str, Any]: |
| prompt = read_text(PROMPTS_DIR / "eval.txt", PROMPT_EVAL).format(reto=reto, fewshot=FEWSHOT_REFERENCE) |
| errors = [] |
| timings = [] |
|
|
| for spec in load_settings()["remote_models"]: |
| start = time.time() |
| out, err = try_remote_model(spec["name"], spec.get("mode", "chat"), prompt, 1600) |
| elapsed = round(time.time() - start, 2) |
| timings.append({"model": spec["name"], "mode": spec.get("mode", "chat"), "seconds": elapsed, "ok": bool(out)}) |
| if isinstance(out, dict) and "criteria" in out and "global_score" in out: |
| out["model"] = spec["name"] |
| out["remote_used"] = True |
| out["remote_errors"] = errors |
| out["diagnostics"] = {"remote_timings": timings} |
| return out |
| if err: |
| errors.append(err) |
|
|
| if load_settings().get("prefer_local_semantic_eval", True): |
| out = fallback_eval_semantic(reto) |
| else: |
| out = fallback_eval_heuristic(reto) |
|
|
| out["model"] = out.get("local_breakdown", {}).get("model", "heuristic-plus") |
| out["remote_errors"] = errors |
| out["diagnostics"] = {"remote_timings": timings} |
| return out |
|
|
|
|
| def save_evaluation(user_id: str, reto_text: str, result_json: Dict[str, Any]) -> None: |
| con = sqlite3.connect(DB_PATH) |
| cur = con.cursor() |
| cur.execute(""" |
| INSERT INTO evaluations (id, user_id, reto_text, result_json, created_at) |
| VALUES (?, ?, ?, ?, ?) |
| """, (str(uuid4()), user_id, reto_text, json.dumps(result_json, ensure_ascii=False), datetime.utcnow().isoformat())) |
| con.commit() |
| con.close() |
|
|
|
|
| def diagnostics_markdown(result: Dict[str, Any]) -> str: |
| if not result: |
| return "Sin diagnóstico todavía." |
| parts = [] |
| parts.append("## Diagnóstico técnico") |
| parts.append(f"- Fuente efectiva **{'LLM remoto' if result.get('remote_used') else 'motor local'}**") |
| parts.append(f"- Modelo registrado **{result.get('model', 'N/D')}**") |
| if result.get("local_breakdown"): |
| local = result["local_breakdown"] |
| parts.append(f"- Método local **{local.get('method','N/D')}**") |
| parts.append(f"- Fórmula global **{local.get('global_formula','N/D')}**") |
| parts.append(f"- Fórmula por criterio **{local.get('score_formula','N/D')}**") |
| parts.append(f"- Calibración **{local.get('calibration_note','N/D')}**") |
| if local.get("example_similarity_max") is not None: |
| parts.append(f"- Similitud máxima con ejemplos guía **{local.get('example_similarity_max')}**") |
| if local.get("similarity_max") is not None: |
| parts.append(f"- Similitud léxica máxima con ejemplos guía **{local.get('similarity_max')}**") |
| timings = result.get("diagnostics", {}).get("remote_timings", []) |
| if timings: |
| rows = ["| Modelo | Modo | Segundos | Respuesta válida |", "|---|---|---:|---|"] |
| for item in timings: |
| rows.append(f"| {item.get('model','')} | {item.get('mode','')} | {item.get('seconds',0)} | {'Sí' if item.get('ok') else 'No'} |") |
| parts.append("\n\n" + "\n".join(rows)) |
| if result.get("remote_errors"): |
| parts.append("\n\n**Errores remotos**\n- " + "\n- ".join(result["remote_errors"][:6])) |
| return "\n".join(parts) |
|
|
|
|
| def score_color(score: int) -> str: |
| if score >= 80: |
| return "#2563eb" |
| if score >= 60: |
| return "#16a34a" |
| if score >= 40: |
| return "#f59e0b" |
| return "#ef4444" |
|
|
|
|
| def eval_chart_html(result: Dict[str, Any]) -> str: |
| criteria = result.get("criteria", {}) |
| cards = [] |
| for cid in ["C1", "C2", "C3", "C4", "C5", "C6", "C7"]: |
| item = criteria.get(cid, {}) |
| score = int(item.get("score", 0)) |
| evidence = item.get("evidence_terms", []) |
| evid_html = "" |
| if evidence: |
| pills = "".join([f'<span class="tag-pill">{e}</span>' for e in evidence[:4]]) |
| evid_html = f'<div class="evidence-pills">{pills}</div>' |
| cards.append(f""" |
| <div class="criterion-card pop-in"> |
| <div class="criterion-head"> |
| <div class="criterion-id">{cid}</div> |
| <div class="criterion-score">{score}%</div> |
| </div> |
| <div class="criterion-label">{item.get("label", CYNEFIN_LABELS[cid])}</div> |
| <div class="bar"><div class="fill animate-fill" style="width:{score}%; background:{score_color(score)};"></div></div> |
| {evid_html} |
| </div> |
| """) |
| global_score = int(result.get("global_score", 0)) |
| model = result.get("model", "N/D") |
| source = "LLM remoto" if result.get("remote_used") else "Evaluación local" |
| return f""" |
| <div class="score-wrap pop-in"> |
| <div class="global-score-card"> |
| <div class="global-score-label">Puntaje global</div> |
| <div class="global-score-value">{global_score}%</div> |
| <div class="global-score-model">Fuente {source} · Modelo {model}</div> |
| <div class="bar"><div class="fill animate-fill" style="width:{global_score}%; background:{score_color(global_score)};"></div></div> |
| </div> |
| <div class="criterion-grid"> |
| {''.join(cards)} |
| </div> |
| </div> |
| """ |
|
|
|
|
| def breakdown_markdown(result: Dict[str, Any]) -> str: |
| local = result.get("local_breakdown", {}) |
| if not local: |
| return "" |
| rows = [] |
| method = local.get("method", "local") |
| if method == "semantic-local": |
| for cid in ["C1", "C2", "C3", "C4", "C5", "C6", "C7"]: |
| item = local.get("per_criterion", {}).get(cid, {}) |
| rows.append( |
| f"| {cid} | {item.get('anchor_similarity','')} | {item.get('semantic_score','')} | {item.get('signal_hits','')} | {item.get('signal_score','')} | {item.get('example_similarity','')} | {item.get('example_score','')} | {item.get('actor_boost','')} | {item.get('final_score','')} |" |
| ) |
| note = local.get("calibration_note", "Sin calibración adicional.") |
| return ( |
| "\n\n**Cómo se calculó localmente**\n\n" |
| f"- Método local **{method}**\n" |
| f"- Modelo local **{local.get('model','N/D')}**\n" |
| f"- Similitud máxima con ejemplos guía **{local.get('example_similarity_max','N/D')}**\n" |
| f"- Score global crudo **{local.get('raw_global_score','N/D')}**\n" |
| f"- Score global ajustado **{local.get('adjusted_global_score','N/D')}**\n" |
| f"- Fórmula global **{local.get('global_formula','N/D')}**\n" |
| f"- Fórmula por criterio **{local.get('score_formula','N/D')}**\n" |
| f"- Calibración **{note}**\n\n" |
| "| ID | Sim ancla | Score semántico | Señales | Score señales | Sim ejemplos | Score ejemplos | Boost actores | Score final |\n" |
| "|---|---:|---:|---:|---:|---:|---:|---:|---:|\n" |
| + "\n".join(rows) |
| ) |
| else: |
| for cid in ["C1", "C2", "C3", "C4", "C5", "C6", "C7"]: |
| item = local.get("per_criterion", {}).get(cid, {}) |
| rows.append( |
| f"| {cid} | {item.get('base','')} | {item.get('signal_hits','')} | {item.get('signal_boost','')} | {item.get('sim_boost','')} | {item.get('actor_boost','')} | {item.get('final_score','')} |" |
| ) |
| note = local.get("calibration_note", "Sin calibración adicional.") |
| return ( |
| "\n\n**Cómo se calculó localmente**\n\n" |
| f"- Método local **{method}**\n" |
| f"- Similitud máxima con ejemplos guía **{local.get('similarity_max','N/D')}**\n" |
| f"- Score global crudo **{local.get('raw_global_score','N/D')}**\n" |
| f"- Score global ajustado **{local.get('adjusted_global_score','N/D')}**\n" |
| f"- Fórmula global **{local.get('global_formula','N/D')}**\n" |
| f"- Fórmula por criterio **{local.get('score_formula','N/D')}**\n" |
| f"- Calibración **{note}**\n" |
| f"- Nota técnica **{local.get('semantic_unavailable','')}**\n\n" |
| "| ID | Base | Señales | Boost señales | Boost similitud | Boost actores | Score final |\n" |
| "|---|---:|---:|---:|---:|---:|---:|\n" |
| + "\n".join(rows) |
| ) |
|
|
|
|
| def evidence_markdown(result: Dict[str, Any]) -> str: |
| blocks = [] |
| for cid in ["C1", "C2", "C3", "C4", "C5", "C6", "C7"]: |
| item = result.get("criteria", {}).get(cid, {}) |
| snippets = item.get("evidence_snippets", []) |
| terms = item.get("evidence_terms", []) |
| if snippets or terms: |
| blocks.append(f"### {cid} {item.get('label', CYNEFIN_LABELS[cid])}") |
| if terms: |
| blocks.append("**Señales detectadas** " + ", ".join(terms)) |
| if snippets: |
| blocks.append("**Fragmentos activados**") |
| for s in snippets: |
| blocks.append(f"- {s}") |
| return "\n\n".join(blocks) if blocks else "No se detectaron fragmentos explícitos. El motor se apoyó más en similitud semántica global." |
|
|
|
|
| def eval_markdown(result: Dict[str, Any]) -> str: |
| strengths = result.get("strengths", []) |
| gaps = result.get("gaps", []) |
| rows = [] |
| for cid in ["C1", "C2", "C3", "C4", "C5", "C6", "C7"]: |
| item = result.get("criteria", {}).get(cid, {}) |
| rows.append( |
| f"| {cid} | {item.get('label', CYNEFIN_LABELS[cid])} | {item.get('score', 0)} | {item.get('status', 'N/D')} | {item.get('justification', '')} |" |
| ) |
| source_text = "LLM remoto" if result.get("remote_used") else "evaluación local" |
| return ( |
| f"## Lectura del reto\n\n" |
| f"**Clasificación** **{result.get('global_label', 'sin_clasificar')}** \n" |
| f"**Fuente** **{source_text}** \n" |
| f"**Resumen** {result.get('global_summary', '')}\n\n" |
| f"**Fortalezas**\n- " + "\n- ".join(strengths or ["Sin fortalezas destacadas"]) + "\n\n" |
| f"**Vacíos**\n- " + "\n- ".join(gaps or ["Sin vacíos destacados"]) + "\n\n" |
| f"**Reformulación sugerida**\n\n{result.get('rewrite_suggestion', '')}\n\n" |
| "| ID | Característica | Puntaje | Estado | Justificación |\n" |
| "|---|---|---:|---|---|\n" + "\n".join(rows) |
| + breakdown_markdown(result) |
| + "\n\n**Explicabilidad por fragmentos**\n\n" |
| + evidence_markdown(result) |
| ) |
|
|
|
|
| def evaluate_step(reto: str, session: Dict[str, Any]): |
| session = session or {} |
| if not isinstance(session, dict): |
| session = {} |
| user_id = session.get("user_id") or str(uuid4()) |
| session["user_id"] = user_id |
|
|
| ok, msg = l0_validate(reto) |
| if not ok: |
| return ( |
| f"## Validación básica\n\n❌ {msg}", |
| "<div></div>", |
| "Sin diagnóstico todavía.", |
| session, |
| "", |
| gr.update(interactive=False), |
| gr.update(interactive=False), |
| ) |
|
|
| result = evaluate_reto_with_llm(reto) |
| session["reto_text"] = reto |
| session["eval_result"] = result |
| save_evaluation(user_id, reto, result) |
|
|
| next_enabled = gr.update(interactive=True) |
| return ( |
| eval_markdown(result), |
| eval_chart_html(result), |
| diagnostics_markdown(result), |
| session, |
| reto, |
| next_enabled, |
| next_enabled, |
| ) |
|
|
|
|
| def section_context(data: Dict[str, Any]) -> str: |
| lines = [] |
| for key in ["definicion", "relevancia", "conexion", "gobernanza", "iniciativas"]: |
| value = (data.get(key) or "").strip() |
| if value: |
| lines.append(f"{key}: {value}") |
| actors = data.get("actors", []) |
| if actors: |
| lines.append("actores: " + " | ".join([f"{a.get('name','')} - {a.get('role','')}" for a in actors])) |
| return "\n".join(lines) if lines else "Sin contexto previo" |
|
|
|
|
| def local_section_suggestion(reto: str, section_key: str, current_data: Dict[str, Any]) -> str: |
| if section_key == "definicion": |
| return f"Este reto puede definirse como un sistema abierto que involucra {max(2, len(current_data.get('actors', [])))} o más actores, incertidumbre contextual y la necesidad de articular respuestas no lineales. La formulación debe resaltar que no existe una receta única y que el resultado dependerá de coordinación, aprendizaje y ajuste continuo." |
| if section_key == "relevancia": |
| return "El reto es relevante porque afecta resultados públicos, capacidad de coordinación y legitimidad de la respuesta. Si no se aborda bien, puede escalar costos, agravar tensiones entre actores y bloquear aprendizajes que solo aparecen cuando se interviene de forma adaptativa." |
| if section_key == "conexion": |
| return "El reto se conecta con el ámbito de trabajo porque exige articular actores, traducir evidencia en decisiones y construir mecanismos de ajuste continuo. Esto implica capacidad de coordinación, lectura del contexto y margen para pilotear respuestas antes de escalar." |
| if section_key == "gobernanza": |
| return "La gobernanza colaborativa y anticipatoria es útil porque permite repartir información, ajustar incentivos y generar respuestas emergentes sin depender de una sola autoridad. En retos complejos conviene coordinar, probar y corregir en ciclos cortos para reducir daño y aumentar aprendizaje." |
| if section_key == "iniciativas": |
| return "Conviene mapear pilotos, alianzas interinstitucionales, redes comunitarias, programas existentes y experiencias comparables. El objetivo no es copiar una solución única, sino identificar mecanismos, aprendizajes y capacidades transferibles para adaptar al contexto." |
| return "" |
|
|
|
|
| def suggest_section_answer(reto: str, section_key: str, current_data: Dict[str, Any]) -> str: |
| section_name, question = SECTION_META.get(section_key, (section_key, "")) |
| prompt = read_text(PROMPTS_DIR / "section.txt", PROMPT_SECTION).format( |
| reto=reto, |
| section_name=section_name, |
| question=question, |
| context=section_context(current_data), |
| ) |
| for spec in load_settings()["assist_models"]: |
| out, _ = try_remote_model(spec["name"], spec.get("mode", "chat"), prompt, 450) |
| if isinstance(out, dict) and out.get("answer"): |
| return str(out["answer"]).strip() |
| return local_section_suggestion(reto, section_key, current_data) |
|
|
|
|
| def autofill_all_sections(reto: str, definicion: str, relevancia: str, conexion: str, gobernanza: str, iniciativas: str, actors_state: List[Dict[str, str]]): |
| current = { |
| "definicion": definicion, |
| "relevancia": relevancia, |
| "conexion": conexion, |
| "gobernanza": gobernanza, |
| "iniciativas": iniciativas, |
| "actors": actors_state or [], |
| } |
| if not (definicion or "").strip(): |
| definicion = local_section_suggestion(reto, "definicion", current) |
| current["definicion"] = definicion |
| if not (relevancia or "").strip(): |
| relevancia = local_section_suggestion(reto, "relevancia", current) |
| current["relevancia"] = relevancia |
| if not (conexion or "").strip(): |
| conexion = local_section_suggestion(reto, "conexion", current) |
| current["conexion"] = conexion |
| if not (gobernanza or "").strip(): |
| gobernanza = local_section_suggestion(reto, "gobernanza", current) |
| current["gobernanza"] = gobernanza |
| if not (iniciativas or "").strip(): |
| iniciativas = local_section_suggestion(reto, "iniciativas", current) |
| return definicion, relevancia, conexion, gobernanza, iniciativas |
|
|
|
|
| def actor_table_md(actors_state: List[Dict[str, str]]) -> str: |
| actors_state = actors_state or [] |
| if not actors_state: |
| return "Sin actores agregados todavía." |
| rows = ["| # | Actor | Rol |", "|---:|---|---|"] |
| for i, actor in enumerate(actors_state, start=1): |
| rows.append(f"| {i} | {actor.get('name','')} | {actor.get('role','')} |") |
| return "\n".join(rows) |
|
|
|
|
| def actor_dropdown_choices(actors_state: List[Dict[str, str]]) -> gr.update: |
| actors_state = actors_state or [] |
| choices = [] |
| for i, actor in enumerate(actors_state): |
| label = f"{i+1}. {actor.get('name','')} — {actor.get('role','')}".strip(" —") |
| choices.append((label, str(i))) |
| return gr.update(choices=choices, value=None) |
|
|
|
|
| def add_actor(name: str, role: str, actors_state: List[Dict[str, str]]): |
| actors_state = actors_state or [] |
| name = (name or "").strip() |
| role = (role or "").strip() |
| if not name: |
| return actors_state, actor_table_md(actors_state), "", "", actor_dropdown_choices(actors_state) |
| actors_state.append({"name": name, "role": role}) |
| return actors_state, actor_table_md(actors_state), "", "", actor_dropdown_choices(actors_state) |
|
|
|
|
| def remove_selected_actor(selected_idx: str, actors_state: List[Dict[str, str]]): |
| actors_state = actors_state or [] |
| try: |
| idx = int(selected_idx) |
| if 0 <= idx < len(actors_state): |
| actors_state.pop(idx) |
| except Exception: |
| pass |
| return actors_state, actor_table_md(actors_state), actor_dropdown_choices(actors_state) |
|
|
|
|
| def stepper_html(idx: int) -> str: |
| chips = [] |
| for i, (_, label) in enumerate(WIZARD_STEPS): |
| cls = "step active" if i == idx else "step done" if i < idx else "step" |
| chips.append(f'<div class="{cls}">{i+1}. {label}</div>') |
| return '<div class="stepper">' + "".join(chips) + '</div>' |
|
|
|
|
| def render_step_visibility(idx: int): |
| vis = [] |
| total_groups = len(WIZARD_STEPS) |
| for i in range(total_groups): |
| vis.append(gr.update(visible=(i == idx))) |
| return vis |
|
|
|
|
| def go_next(idx: int): |
| idx = min(len(WIZARD_STEPS) - 1, int(idx) + 1) |
| return [idx, stepper_html(idx)] + render_step_visibility(idx) |
|
|
|
|
| def go_prev(idx: int): |
| idx = max(0, int(idx) - 1) |
| return [idx, stepper_html(idx)] + render_step_visibility(idx) |
|
|
|
|
| def load_font(size: int, bold: bool = False): |
| candidates = [ |
| "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" if bold else "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", |
| "/usr/share/fonts/truetype/liberation2/LiberationSans-Bold.ttf" if bold else "/usr/share/fonts/truetype/liberation2/LiberationSans-Regular.ttf", |
| ] |
| for path in candidates: |
| if Path(path).exists(): |
| return ImageFont.truetype(path, size) |
| return ImageFont.load_default() |
|
|
|
|
| def draw_panel(draw, xy, title, body, font_title, font_body, colors): |
| x1, y1, x2, y2 = xy |
| draw.rounded_rectangle(xy, radius=18, fill=colors["panel_bg"], outline=colors["panel_border"], width=2) |
| draw.text((x1 + 24, y1 + 20), title, fill=colors["text_color"], font=font_title) |
| max_width = (x2 - x1) - 48 |
| words = (body or "").split() |
| lines = [] |
| current = "" |
| for word in words: |
| test = current + (" " if current else "") + word |
| bbox = draw.textbbox((0, 0), test, font=font_body) |
| width = bbox[2] - bbox[0] |
| if width <= max_width: |
| current = test |
| else: |
| if current: |
| lines.append(current) |
| current = word |
| if current: |
| lines.append(current) |
| max_lines = max(4, int(((y2 - y1) - 90) / 28)) |
| if len(lines) > max_lines: |
| lines = lines[:max_lines] |
| lines[-1] = trim_text_to_fit(lines[-1], max(20, len(lines[-1]) - 3)) |
| y = y1 + 68 |
| for line in lines: |
| draw.text((x1 + 24, y), line, fill=colors["text_color"], font=font_body) |
| y += 28 |
|
|
|
|
| def generate_canvas_image(reto: str, definicion: str, relevancia: str, conexion: str, actors_text: str, gobernanza: str, iniciativas: str, user_id: str, eval_result: Optional[Dict[str, Any]] = None) -> Path: |
| s = load_settings()["canvas"] |
| W, H = s["width"], s["height"] |
| img = Image.new("RGBA", (W, H), s["background_color"]) |
| draw = ImageDraw.Draw(img) |
|
|
| font_header = load_font(34, bold=True) |
| font_title = load_font(24, bold=True) |
| font_body = load_font(18, bold=False) |
| font_footer = load_font(16, bold=False) |
| font_badge = load_font(18, bold=True) |
|
|
| colors = { |
| "header_color": s["header_color"], |
| "panel_bg": s["panel_bg"], |
| "panel_border": s["panel_border"], |
| "text_color": s["text_color"], |
| "accent_color": s["accent_color"], |
| } |
|
|
| draw.rounded_rectangle((40, 30, W - 40, 185), radius=24, fill=colors["header_color"]) |
| header_lines = textwrap.wrap(trim_text_to_fit(reto, 230), width=62)[:3] |
| y = 56 |
| for line in header_lines: |
| draw.text((80, y), line, fill="white", font=font_header) |
| y += 40 |
|
|
| if eval_result: |
| score = int(eval_result.get("global_score", 0)) |
| badge = f"{score}% · {'Remoto' if eval_result.get('remote_used') else 'Local'}" |
| badge_box = (W - 360, 52, W - 80, 105) |
| draw.rounded_rectangle(badge_box, radius=20, fill=(255, 255, 255, 45)) |
| draw.text((badge_box[0] + 22, badge_box[1] + 12), badge, fill="white", font=font_badge) |
|
|
| draw_panel(draw, (40, 220, 930, 455), "1. Definición", definicion, font_title, font_body, colors) |
| draw_panel(draw, (40, 480, 930, 715), "2. Relevancia", relevancia, font_title, font_body, colors) |
| draw_panel(draw, (40, 740, 930, 975), "3. Conexión personal", conexion, font_title, font_body, colors) |
| draw_panel(draw, (990, 220, W - 40, 455), "4. Actores", actors_text, font_title, font_body, colors) |
| draw_panel(draw, (990, 480, W - 40, 975), "5. Gobernanza", gobernanza, font_title, font_body, colors) |
|
|
| iniciativas_box = (40, 1005, W - 40, 1185) |
| draw.rounded_rectangle(iniciativas_box, radius=18, fill=colors["panel_bg"], outline=colors["panel_border"], width=2) |
| draw.text((64, 1025), "6. Iniciativas", fill=colors["text_color"], font=font_title) |
| ini_text = iniciativas if (iniciativas or "").strip() else "Sin contenido agregado todavía." |
| lines = textwrap.wrap(trim_text_to_fit(ini_text, 340), width=118)[:4] |
| y2 = 1068 |
| for line in lines: |
| draw.text((64, y2), line, fill=colors["text_color"], font=font_body) |
| y2 += 24 |
|
|
| if eval_result: |
| sum_box = (40, 1208, W - 40, 1280) |
| draw.rounded_rectangle(sum_box, radius=16, fill=(240, 248, 255), outline=colors["panel_border"], width=2) |
| summary = trim_text_to_fit(eval_result.get("global_summary", ""), 220) |
| draw.text((64, 1226), f"Resumen de evaluación {summary}", fill=colors["text_color"], font=font_footer) |
|
|
| footer_text = f"Generado el {datetime.now().strftime('%Y-%m-%d %H:%M')} • {APP_NAME}" |
| draw.text((60, H - 50), footer_text, fill=colors["text_color"], font=font_footer) |
|
|
| if LOGO_PATH.exists(): |
| try: |
| logo = Image.open(LOGO_PATH).convert("RGBA") |
| logo.thumbnail((220, 56)) |
| img.alpha_composite(logo, (W - logo.width - 60, H - logo.height - 22)) |
| except Exception: |
| pass |
|
|
| out_dir = DATA_DIR / "exports" |
| out_dir.mkdir(parents=True, exist_ok=True) |
| out = out_dir / f"canvas_{user_id}_{int(time.time())}.png" |
| img.save(out, "PNG", dpi=(s["dpi"], s["dpi"])) |
| return out.resolve() |
|
|
|
|
| def save_canvas(user_id: str, reto: str, canvas_dict: Dict[str, Any], png_path: Path) -> None: |
| con = sqlite3.connect(DB_PATH) |
| cur = con.cursor() |
| cur.execute(""" |
| INSERT INTO canvases (id, user_id, reto_text, canvas_json, png_path, created_at) |
| VALUES (?, ?, ?, ?, ?, ?) |
| """, (str(uuid4()), user_id, reto, json.dumps(canvas_dict, ensure_ascii=False), str(png_path), datetime.utcnow().isoformat())) |
| con.commit() |
| con.close() |
|
|
|
|
| def create_board(reto: str, definicion: str, relevancia: str, conexion: str, gobernanza: str, iniciativas: str, actors_state: List[Dict[str, str]], session: Dict[str, Any]): |
| session = session or {} |
| user_id = session.get("user_id", str(uuid4())) |
| actors_state = actors_state or [] |
| eval_result = session.get("eval_result", {}) |
| actors_text = "\n".join([f"- {a.get('name','')} — {a.get('role','')}" if a.get("role") else f"- {a.get('name','')}" for a in actors_state]) or "Sin actores agregados todavía." |
|
|
| png = generate_canvas_image(reto, definicion, relevancia, conexion, actors_text, gobernanza, iniciativas, user_id, eval_result) |
| canvas_dict = { |
| "definicion": definicion, |
| "relevancia": relevancia, |
| "conexion": conexion, |
| "actors": actors_state, |
| "gobernanza": gobernanza, |
| "iniciativas": iniciativas, |
| "eval_result": eval_result, |
| } |
| save_canvas(user_id, reto, canvas_dict, png) |
| session["last_board"] = str(png) |
| filename = Path(png).name |
| return ( |
| "## Board generado correctamente", |
| str(png), |
| gr.update(value=str(png), visible=True, label=f"Descargar PNG {filename}"), |
| session |
| ) |
|
|
|
|
| def info_html() -> str: |
| return """ |
| <div class="info-card pop-in"> |
| <h2>Metodología de evaluación y construcción del resultado</h2> |
| <p>Esta aplicación está diseñada para que cualquier persona pueda entender de dónde salen los resultados. El sistema evalúa la formulación de un reto a partir de la lógica del marco Cynefin y luego muestra una lectura cuantificada que puede auditarse. El resultado no debe leerse como una verdad absoluta. Debe leerse como una operacionalización transparente de rasgos de complejidad.</p> |
| <h3>1. Qué es Cynefin y por qué se usa aquí</h3> |
| <p>Cynefin es un marco de sentido y decisión desarrollado por Dave Snowden y colaboradores para distinguir contextos simples, complicados, complejos, caóticos y de desorden. Su utilidad central consiste en recordar que no todos los problemas admiten la misma forma de intervención. En contextos complejos no suele existir una respuesta única previa. Lo apropiado es explorar, observar patrones emergentes y ajustar. Ese principio es la base conceptual del evaluador.</p> |
| <p><b>Referencias centrales</b><br> |
| Kurtz, C. F., & Snowden, D. J. 2003. The new dynamics of strategy. IBM Systems Journal, 42(3), 462–483.<br> |
| Snowden, D. J., & Boone, M. E. 2007. A Leader’s Framework for Decision Making. Harvard Business Review, 85(11), 68–76.<br> |
| Snowden, D. J. 2010. The Cynefin framework and naturalizing sense-making.</p> |
| <h3>2. Qué intenta medir la aplicación</h3> |
| <p>La aplicación no intenta determinar si un reto es “bueno” en términos generales. Intenta estimar qué tan visible es su estructura de complejidad. Para ello usa siete criterios que condensan rasgos frecuentes de los problemas complejos en la literatura de Cynefin y en su uso práctico para diseño de intervención.</p> |
| <table style="width:100%;border-collapse:collapse;background:white;border:1px solid #e5e7eb;"> |
| <tr style="background:#eff6ff;"> |
| <th style="padding:10px;text-align:left;">ID</th> |
| <th style="padding:10px;text-align:left;">Criterio</th> |
| <th style="padding:10px;text-align:left;">Qué significa dentro de la app</th> |
| <th style="padding:10px;text-align:left;">Fundamento bibliográfico principal</th> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C1</td> |
| <td style="padding:10px;">Incertidumbre alta</td> |
| <td style="padding:10px;">Busca evidencia de que el reto opera en un entorno incierto, ambiguo o no lineal y que no existe una receta obvia.</td> |
| <td style="padding:10px;">Snowden & Boone 2007, dominio complejo.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C2</td> |
| <td style="padding:10px;">Multiplicidad de actores</td> |
| <td style="padding:10px;">Busca presencia o inferencia de varios actores con intereses, funciones o capacidades distintas.</td> |
| <td style="padding:10px;">Kurtz & Snowden 2003, interacciones y patrones sociales.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C3</td> |
| <td style="padding:10px;">Interdependencia</td> |
| <td style="padding:10px;">Busca evidencia de relaciones sistémicas donde una decisión afecta otras partes del problema.</td> |
| <td style="padding:10px;">Kurtz & Snowden 2003, systems, distributed cognition, pattern interactions.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C4</td> |
| <td style="padding:10px;">Emergencia sobre imposición</td> |
| <td style="padding:10px;">Busca señales de que la solución debe emerger por interacción, coordinación y ajuste y no por diseño lineal único.</td> |
| <td style="padding:10px;">Snowden & Boone 2007, probe-sense-respond y emergencia en complejo.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C5</td> |
| <td style="padding:10px;">Gestión por restricciones</td> |
| <td style="padding:10px;">Busca límites éticos, legales, institucionales, territoriales o presupuestarios que condicionan la acción.</td> |
| <td style="padding:10px;">Literatura de complejidad aplicada y gobernanza adaptativa; compatible con Cynefin como lectura contextual de restricciones.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C6</td> |
| <td style="padding:10px;">Experimentación</td> |
| <td style="padding:10px;">Busca si la formulación admite pilotos, pruebas pequeñas, iteración o aprendizaje seguro antes de escalar.</td> |
| <td style="padding:10px;">Snowden & Boone 2007, lógica probe-sense-respond.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">C7</td> |
| <td style="padding:10px;">Aprendizaje y adaptación</td> |
| <td style="padding:10px;">Busca evidencia de monitoreo, ajuste, aprendizaje continuo o cambio de respuesta según patrones emergentes.</td> |
| <td style="padding:10px;">Snowden 2010, sense-making adaptativo y patrones emergentes.</td> |
| </tr> |
| </table> |
| <h3>3. Flujo metodológico completo</h3> |
| <p>La aplicación sigue una secuencia fija de procesamiento. Cada capa cumple una función distinta y puede dejar rastros visibles en la pestaña de diagnóstico.</p> |
| <table style="width:100%;border-collapse:collapse;background:white;border:1px solid #e5e7eb;"> |
| <tr style="background:#eff6ff;"> |
| <th style="padding:10px;text-align:left;">Fase</th> |
| <th style="padding:10px;text-align:left;">Qué hace</th> |
| <th style="padding:10px;text-align:left;">Qué aporta al resultado</th> |
| </tr> |
| <tr> |
| <td style="padding:10px;">Validación básica</td> |
| <td style="padding:10px;">Verifica longitud mínima, idioma español, forma interrogativa y ausencia de contenido bloqueado.</td> |
| <td style="padding:10px;">Evita evaluar entradas inválidas o demasiado débiles.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">Intento remoto con LLM</td> |
| <td style="padding:10px;">Pregunta a uno o varios modelos alojados en Hugging Face para obtener JSON estructurado.</td> |
| <td style="padding:10px;">Si funciona, aporta una lectura generativa directa del reto.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">Motor semántico local</td> |
| <td style="padding:10px;">Convierte el reto, las anclas conceptuales y los ejemplos guía en embeddings y calcula similitud.</td> |
| <td style="padding:10px;">Permite seguir operando aunque el remoto falle.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">Heurística local de contingencia</td> |
| <td style="padding:10px;">Usa términos observables, conteo de actores y similitud léxica cuando no existe motor semántico disponible.</td> |
| <td style="padding:10px;">Mantiene la app utilizable en escenarios mínimos.</td> |
| </tr> |
| </table> |
| <h3>4. Papel del LLM dentro de la metodología</h3> |
| <p>El LLM no define la teoría. La teoría la define Cynefin. El LLM solo intenta producir una evaluación estructurada en formato JSON a partir de un prompt que ya está guiado por criterios, ejemplos y restricciones de salida. Si responde bien, su salida se usa. Si no responde, la aplicación cambia a métodos locales controlados. En otras palabras, el LLM es una capa de conveniencia y no la fuente conceptual del sistema.</p> |
| <h3>5. Papel del motor semántico local</h3> |
| <p>El motor local usa un modelo multilingüe de sentence-transformers para representar texto como vectores. Luego compara el reto con dos tipos de referencia. Primero con anclas conceptuales específicas por criterio. Segundo con ejemplos guía completos. Esa doble comparación permite medir afinidad conceptual aunque la redacción del usuario no use exactamente las mismas palabras.</p> |
| <h3>6. Fórmula del puntaje global</h3> |
| <div style="background:white;border:1px solid #e5e7eb;border-radius:20px;padding:16px;margin:14px 0;"> |
| <div style="font-family:monospace;background:#f8fafc;border-radius:12px;padding:12px;"> |
| Puntaje global = (C1 + C2 + C3 + C4 + C5 + C6 + C7) / 7 |
| </div> |
| </div> |
| <p>La aplicación usa promedio simple para preservar interpretabilidad. Todos los criterios pesan igual en la capa final. Esa decisión es metodológica y busca trazabilidad. Quien lea el resultado puede reproducir el cálculo sin una caja negra adicional.</p> |
| <h3>7. Fórmula del puntaje por criterio en modo semántico local</h3> |
| <div style="background:white;border:1px solid #e5e7eb;border-radius:20px;padding:16px;margin:14px 0;"> |
| <div style="font-family:monospace;background:#f8fafc;border-radius:12px;padding:12px;"> |
| score_criterio = clamp(18, 95, |
| 0.55 × score_semántico_ancla + |
| 0.25 × score_señales_explícitas + |
| 0.20 × score_similitud_ejemplos + |
| boost_actores) |
| </div> |
| </div> |
| <table style="width:100%;border-collapse:collapse;background:white;border:1px solid #e5e7eb;"> |
| <tr style="background:#eff6ff;"> |
| <th style="padding:10px;text-align:left;">Componente</th> |
| <th style="padding:10px;text-align:left;">Qué representa</th> |
| <th style="padding:10px;text-align:left;">Razón metodológica</th> |
| </tr> |
| <tr> |
| <td style="padding:10px;">score_semántico_ancla</td> |
| <td style="padding:10px;">Similitud entre el reto y las anclas del criterio.</td> |
| <td style="padding:10px;">Tiene el mayor peso porque busca capturar sentido conceptual.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">score_señales_explícitas</td> |
| <td style="padding:10px;">Huella textual visible dentro del propio reto.</td> |
| <td style="padding:10px;">Evita que todo dependa de una similitud global opaca.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">score_similitud_ejemplos</td> |
| <td style="padding:10px;">Parecido con ejemplos guía bien formulados.</td> |
| <td style="padding:10px;">Ayuda a reconocer estructura aunque cambie el estilo de redacción.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">boost_actores</td> |
| <td style="padding:10px;">Suma extra en C2 por presencia de varios actores plausibles.</td> |
| <td style="padding:10px;">Compensa que la multiplicidad de actores necesita una señal específica.</td> |
| </tr> |
| </table> |
| <h3>8. Cómo se transforma la similitud a un score interpretable</h3> |
| <div style="background:white;border:1px solid #e5e7eb;border-radius:20px;padding:16px;margin:14px 0;"> |
| <div style="font-family:monospace;background:#f8fafc;border-radius:12px;padding:12px;"> |
| si sim ≤ 0.18 → 20 |
| si sim ≥ 0.62 → 92 |
| en medio → interpolación lineal |
| </div> |
| </div> |
| <p>La similitud semántica cruda no se entrega directamente porque no sería intuitiva para un usuario general. Por eso se reescala a un rango más legible. El límite inferior evita falsos ceros absolutos. El límite superior evita que cualquier parecido moderado dispare máximos artificiales.</p> |
| <h3>9. Calibración global posterior</h3> |
| <p>Después del promedio simple, la aplicación puede aplicar un piso si el reto se parece mucho a los ejemplos guía. Esto existe para corregir un problema frecuente. Algunos retos tienen buena estructura compleja, pero expresan esa estructura con vocabulario distinto y por eso podrían salir subestimados.</p> |
| <ul> |
| <li>Si la similitud con ejemplos guía es al menos 0.58 y el promedio cae debajo de 72, se eleva a 72.</li> |
| <li>Si la similitud con ejemplos guía es al menos 0.68 y el promedio cae debajo de 78, se eleva a 78.</li> |
| </ul> |
| <h3>10. Cómo se interpreta cada rango</h3> |
| <table style="width:100%;border-collapse:collapse;background:white;border:1px solid #e5e7eb;"> |
| <tr style="background:#eff6ff;"> |
| <th style="padding:10px;text-align:left;">Rango</th> |
| <th style="padding:10px;text-align:left;">Lectura técnica</th> |
| <th style="padding:10px;text-align:left;">Sentido práctico</th> |
| </tr> |
| <tr> |
| <td style="padding:10px;">80–100</td> |
| <td style="padding:10px;">Rasgo claramente presente</td> |
| <td style="padding:10px;">El criterio está expresado con suficiente fuerza para sostener una lectura compleja.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">60–79</td> |
| <td style="padding:10px;">Rasgo bastante visible</td> |
| <td style="padding:10px;">El rasgo existe y puede defenderse, aunque aún admite refuerzo.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">40–59</td> |
| <td style="padding:10px;">Rasgo parcial o implícito</td> |
| <td style="padding:10px;">Hay señales, pero todavía no son suficientemente nítidas o abundantes.</td> |
| </tr> |
| <tr> |
| <td style="padding:10px;">0–39</td> |
| <td style="padding:10px;">Rasgo débil o no observable</td> |
| <td style="padding:10px;">El texto ofrece poca evidencia para sostener ese criterio.</td> |
| </tr> |
| </table> |
| <h3>11. Cómo se leer la explicabilidad por criterio</h3> |
| <p>Para cada criterio la aplicación puede mostrar cuatro huellas. Términos detectados, fragmentos activados del texto, similitud con anclas y score final. Eso permite reconstruir por qué un criterio pudo salir más alto o más bajo que otro. La app intenta que el número no quede separado de su rastro.</p> |
| <h3>12. Qué ocurre si el acceso remoto falla</h3> |
| <p>Si aparece un error 401 Unauthorized o no llega una respuesta válida desde Hugging Face, la aplicación cambia a motor local. La teoría evaluativa sigue siendo la misma. Lo que cambia es la capa computacional utilizada para producir el número.</p> |
| <h3>13. Bibliografía básica utilizada en el diseño conceptual</h3> |
| <p>Snowden, D. J., & Boone, M. E. 2007. A Leader’s Framework for Decision Making. Harvard Business Review, 85(11), 68–76.<br> |
| Kurtz, C. F., & Snowden, D. J. 2003. The new dynamics of strategy. IBM Systems Journal, 42(3), 462–483.<br> |
| Snowden, D. J. 2010. The Cynefin framework and naturalizing sense-making.<br> |
| La capa de embeddings no sustituye Cynefin. Solo sirve para operacionalizar similitud conceptual y mantener la aplicación usable cuando el modo remoto no está disponible.</p> |
| </div> |
| """ |
|
|
|
|
| def reset_all(): |
| idx = 0 |
| return ( |
| {"user_id": str(uuid4())}, |
| [], |
| idx, |
| stepper_html(idx), |
| "", "", "<div></div>", "Sin diagnóstico todavía.", "", "", "", "", "Sin actores agregados todavía.", None, "", "", None, gr.update(value=None, visible=False), "🟡 Verificando disponibilidad del LLM…", |
| *render_step_visibility(idx), |
| gr.update(interactive=False), |
| gr.update(interactive=False) |
| ) |
|
|
|
|
| def remote_status_text(): |
| ok, msg = ping_remote_eval() |
| return ("🟢 " if ok else "🔴 ") + msg |
|
|
|
|
| def build_app(): |
| examples = [[ex["text"]] for ex in DEFAULT_EXAMPLES] |
| theme = gr.themes.Soft(primary_hue="blue", secondary_hue="slate", neutral_hue="slate") |
|
|
| css = """ |
| .app-shell { |
| max-width: 1280px; |
| margin: 0 auto; |
| padding-left: 12px; |
| padding-right: 12px; |
| box-sizing: border-box; |
| } |
| |
| .hero { |
| padding: 10px 4px 18px 4px; |
| } |
| |
| .hero h1 { |
| font-size: 32px; |
| font-weight: 800; |
| margin: 0 0 6px 0; |
| line-height: 1.15; |
| } |
| |
| .hero p { |
| color: #6b7280; |
| margin: 0; |
| line-height: 1.45; |
| } |
| |
| .one-card, .wizard-card, .info-card, .diag-card { |
| border-radius: 28px; |
| border: 1px solid #e5e7eb; |
| padding: 18px; |
| background: #fbfbfd; |
| box-sizing: border-box; |
| } |
| |
| .wizard-card { |
| overflow: hidden; |
| } |
| |
| .stepper { |
| display: flex; |
| gap: 10px; |
| flex-wrap: wrap; |
| margin: 8px 0 18px 0; |
| } |
| |
| .step { |
| padding: 10px 14px; |
| border-radius: 999px; |
| border: 1px solid #e5e7eb; |
| background: #fff; |
| color: #6b7280; |
| font-weight: 700; |
| transition: all .25s ease; |
| white-space: nowrap; |
| } |
| |
| .step.active { |
| background: #dbeafe; |
| color: #1d4ed8; |
| border-color: #bfdbfe; |
| transform: translateY(-1px); |
| } |
| |
| .step.done { |
| background: #ecfdf5; |
| color: #15803d; |
| border-color: #bbf7d0; |
| } |
| |
| .criterion-grid { |
| display: grid; |
| grid-template-columns: repeat(2, minmax(0, 1fr)); |
| gap: 14px; |
| margin-top: 16px; |
| } |
| |
| .criterion-card { |
| border-radius: 22px; |
| border: 1px solid #e5e7eb; |
| background: white; |
| padding: 16px; |
| box-shadow: 0 1px 0 rgba(17,24,39,.03); |
| box-sizing: border-box; |
| } |
| |
| .criterion-head { |
| display: flex; |
| justify-content: space-between; |
| gap: 12px; |
| align-items: center; |
| } |
| |
| .criterion-id { |
| font-weight: 800; |
| color: #2563eb; |
| } |
| |
| .criterion-label { |
| margin-top: 8px; |
| color: #111827; |
| font-weight: 600; |
| line-height: 1.35; |
| } |
| |
| .criterion-score { |
| font-size: 20px; |
| font-weight: 800; |
| white-space: nowrap; |
| } |
| |
| .global-score-card { |
| border-radius: 24px; |
| background: white; |
| border: 1px solid #e5e7eb; |
| padding: 18px; |
| box-sizing: border-box; |
| } |
| |
| .global-score-label { |
| color: #6b7280; |
| font-size: 14px; |
| } |
| |
| .global-score-value { |
| font-size: 34px; |
| font-weight: 800; |
| margin-top: 4px; |
| line-height: 1; |
| } |
| |
| .global-score-model { |
| font-size: 12px; |
| color: #6b7280; |
| margin-top: 6px; |
| line-height: 1.4; |
| } |
| |
| .bar { |
| height: 10px; |
| border-radius: 999px; |
| background: #e5e7eb; |
| overflow: hidden; |
| margin-top: 10px; |
| } |
| |
| .fill { |
| height: 100%; |
| border-radius: 999px; |
| } |
| |
| .animate-fill { |
| animation: fillin .6s ease; |
| } |
| |
| .nav-row { |
| display: flex; |
| gap: 12px; |
| margin-top: 8px; |
| } |
| |
| .pop-in { |
| animation: popin .28s ease; |
| } |
| |
| .tag-pill { |
| display: inline-block; |
| font-size: 11px; |
| padding: 4px 8px; |
| border-radius: 999px; |
| background: #eff6ff; |
| color: #1d4ed8; |
| margin-right: 6px; |
| margin-top: 8px; |
| } |
| |
| .evidence-pills { |
| margin-top: 8px; |
| } |
| |
| .llm-status-wrap { |
| display: flex; |
| flex-direction: column; |
| gap: 10px; |
| margin-bottom: 10px; |
| } |
| |
| .llm-status-wrap .gr-button, |
| .primary-action-row .gr-button, |
| .nav-row .gr-button, |
| .actor-actions-mobile .gr-button { |
| min-height: 48px; |
| } |
| |
| .primary-action-row { |
| display: flex; |
| gap: 12px; |
| flex-wrap: wrap; |
| } |
| |
| .actor-actions-mobile { |
| display: flex; |
| gap: 10px; |
| flex-wrap: wrap; |
| } |
| |
| .gradio-container .gr-box, |
| .gradio-container .gr-panel { |
| box-sizing: border-box; |
| } |
| |
| .gradio-container textarea, |
| .gradio-container input, |
| .gradio-container select { |
| font-size: 16px !important; |
| } |
| |
| @keyframes fillin { |
| from {width: 0;} |
| to {width: 100%;} |
| } |
| |
| @keyframes popin { |
| from {opacity: 0; transform: translateY(6px);} |
| to {opacity: 1; transform: translateY(0);} |
| } |
| |
| @media (max-width: 1024px) { |
| .criterion-grid { |
| grid-template-columns: 1fr; |
| } |
| } |
| |
| @media (max-width: 768px) { |
| .app-shell { |
| max-width: 100%; |
| padding-left: 10px; |
| padding-right: 10px; |
| } |
| |
| .hero { |
| padding: 6px 2px 14px 2px; |
| } |
| |
| .hero h1 { |
| font-size: 25px; |
| line-height: 1.15; |
| margin-bottom: 8px; |
| } |
| |
| .hero p { |
| font-size: 14px; |
| line-height: 1.45; |
| } |
| |
| .one-card, .wizard-card, .info-card, .diag-card { |
| padding: 12px; |
| border-radius: 20px; |
| } |
| |
| .stepper { |
| gap: 8px; |
| margin: 6px 0 14px 0; |
| overflow-x: auto; |
| flex-wrap: nowrap; |
| padding-bottom: 4px; |
| scrollbar-width: thin; |
| } |
| |
| .step { |
| padding: 9px 12px; |
| font-size: 13px; |
| flex: 0 0 auto; |
| } |
| |
| .global-score-card, |
| .criterion-card { |
| border-radius: 18px; |
| padding: 14px; |
| } |
| |
| .global-score-value { |
| font-size: 30px; |
| } |
| |
| .criterion-score { |
| font-size: 18px; |
| } |
| |
| .criterion-label { |
| font-size: 14px; |
| } |
| |
| .primary-action-row, |
| .nav-row, |
| .actor-actions-mobile { |
| flex-direction: column; |
| gap: 10px; |
| } |
| |
| .primary-action-row > *, |
| .nav-row > *, |
| .actor-actions-mobile > * { |
| width: 100%; |
| } |
| |
| .llm-status-wrap { |
| gap: 8px; |
| margin-bottom: 8px; |
| } |
| |
| .llm-status-wrap .gr-button, |
| .primary-action-row .gr-button, |
| .nav-row .gr-button, |
| .actor-actions-mobile .gr-button { |
| width: 100%; |
| } |
| |
| .evidence-pills { |
| display: flex; |
| flex-wrap: wrap; |
| gap: 6px; |
| } |
| |
| .tag-pill { |
| margin-right: 0; |
| margin-top: 0; |
| } |
| |
| .gradio-container .gr-markdown table { |
| display: block; |
| overflow-x: auto; |
| white-space: nowrap; |
| } |
| } |
| |
| @media (max-width: 520px) { |
| .app-shell { |
| padding-left: 8px; |
| padding-right: 8px; |
| } |
| |
| .hero h1 { |
| font-size: 22px; |
| } |
| |
| .hero p { |
| font-size: 13px; |
| } |
| |
| .one-card, .wizard-card, .info-card, .diag-card { |
| padding: 10px; |
| border-radius: 16px; |
| } |
| |
| .step { |
| padding: 8px 11px; |
| font-size: 12px; |
| } |
| |
| .global-score-card, |
| .criterion-card { |
| padding: 12px; |
| border-radius: 16px; |
| } |
| |
| .global-score-value { |
| font-size: 28px; |
| } |
| |
| .global-score-model { |
| font-size: 11px; |
| } |
| |
| .criterion-head { |
| gap: 8px; |
| } |
| |
| .criterion-score { |
| font-size: 17px; |
| } |
| |
| .criterion-label { |
| font-size: 13px; |
| } |
| |
| .gradio-container .gr-button { |
| font-size: 15px !important; |
| } |
| |
| .gradio-container .gr-form, |
| .gradio-container .gr-box, |
| .gradio-container .gr-panel { |
| min-width: 0 !important; |
| } |
| } |
| """ |
|
|
| with gr.Blocks(theme=theme, css=css, title=APP_NAME) as demo: |
| session = gr.State({"user_id": str(uuid4())}) |
| actors_state = gr.State([]) |
| wizard_idx = gr.State(0) |
|
|
| with gr.Column(elem_classes=["app-shell"]): |
| gr.HTML(""" |
| <div class="hero"> |
| <h1>Sistema de Validación y Gestión de Retos Complejos</h1> |
| <p>Wizard guiado con evaluación inicial del reto, asistencia paso a paso, explicabilidad, diagnóstico técnico y generación de board.</p> |
| </div> |
| """) |
|
|
| with gr.Tab("Wizard"): |
| with gr.Column(elem_classes=["wizard-card"]): |
| stepper = gr.HTML(stepper_html(0)) |
|
|
| with gr.Group(visible=True) as step0: |
| with gr.Column(elem_classes=["llm-status-wrap"]): |
| remote_status = gr.Markdown("🟡 Verificando disponibilidad del LLM…") |
| remote_ping_btn = gr.Button("Volver a probar disponibilidad del LLM") |
|
|
| reto_input = gr.Textbox( |
| label="Paso 1 • Escribe el reto", |
| lines=6, |
| placeholder="Escribe una pregunta de al menos 100 caracteres." |
| ) |
| gr.Examples(examples=examples, inputs=[reto_input], label="Ejemplos guía") |
|
|
| with gr.Row(elem_classes=["primary-action-row"]): |
| eval_btn = gr.Button("Evaluar y continuar", variant="primary") |
| clear_btn = gr.Button("Reiniciar") |
|
|
| eval_md = gr.Markdown() |
| eval_chart = gr.HTML("<div></div>") |
| diagnostics_md = gr.Markdown("Sin diagnóstico todavía.") |
| reto_hidden = gr.Textbox(visible=False) |
|
|
| with gr.Group(visible=False) as step1: |
| gr.Markdown("## Paso 2 • Definición") |
| definicion = gr.Textbox(label="¿Qué aspectos fundamentales definen el reto propuesto?", lines=6) |
| ask_def = gr.Button("Sugerir contenido") |
|
|
| with gr.Group(visible=False) as step2: |
| gr.Markdown("## Paso 3 • Relevancia") |
| relevancia = gr.Textbox(label="¿Por qué creemos que este reto es relevante?", lines=6) |
| ask_rel = gr.Button("Sugerir contenido") |
|
|
| with gr.Group(visible=False) as step3: |
| gr.Markdown("## Paso 4 • Conexión personal") |
| conexion = gr.Textbox(label="¿Cómo se conecta el reto con mi ámbito de trabajo?", lines=6) |
| ask_con = gr.Button("Sugerir contenido") |
|
|
| with gr.Group(visible=False) as step4: |
| gr.Markdown("## Paso 5 • Actores") |
| with gr.Row(): |
| actor_name = gr.Textbox(label="Actor", scale=3) |
| actor_role = gr.Textbox(label="Rol", scale=3) |
| with gr.Row(elem_classes=["actor-actions-mobile"]): |
| actor_add_btn = gr.Button("Agregar actor") |
| actor_remove_btn = gr.Button("Eliminar actor seleccionado") |
| actor_md = gr.Markdown("Sin actores agregados todavía.") |
| actor_select_remove = gr.Dropdown(label="Selecciona un actor para eliminar", choices=[], value=None) |
|
|
| with gr.Group(visible=False) as step5: |
| gr.Markdown("## Paso 6 • Gobernanza") |
| gobernanza = gr.Textbox(label="¿Por qué la gobernanza colaborativa y anticipatoria puede contribuir a abordar el reto?", lines=6) |
| ask_gob = gr.Button("Sugerir contenido") |
|
|
| with gr.Group(visible=False) as step6: |
| gr.Markdown("## Paso 7 • Iniciativas") |
| iniciativas = gr.Textbox(label="¿Qué iniciativas, alianzas, redes o proyectos conocemos relacionados con el reto?", lines=6) |
| ask_ini = gr.Button("Sugerir contenido") |
| autofill_btn = gr.Button("Autocompletar campos faltantes") |
|
|
| with gr.Group(visible=False) as step7: |
| gr.Markdown("## Paso 8 • Resumen y board") |
| board_status = gr.Markdown() |
| board_btn = gr.Button("Crear board PNG", variant="primary") |
| board_preview = gr.Image(type="filepath", label="Vista previa del board") |
| board_download = gr.DownloadButton("Descargar PNG", value=None, visible=False) |
|
|
| with gr.Row(elem_classes=["nav-row"]): |
| prev_btn = gr.Button("Atrás", interactive=False) |
| next_btn = gr.Button("Siguiente", interactive=False) |
|
|
| with gr.Tab("Cómo funciona"): |
| info = gr.HTML(info_html()) |
|
|
| with gr.Tab("Diagnóstico"): |
| gr.Markdown("Aquí se muestra qué motor evaluó el reto, qué intentos remotos se hicieron y cómo se calculó el puntaje local.") |
| diagnostics_tab_md = gr.Markdown("Sin diagnóstico todavía.") |
|
|
| remote_ping_btn.click(fn=remote_status_text, outputs=[remote_status]) |
|
|
| eval_btn.click( |
| fn=evaluate_step, |
| inputs=[reto_input, session], |
| outputs=[eval_md, eval_chart, diagnostics_md, session, reto_hidden, next_btn, prev_btn] |
| ).then( |
| fn=lambda s: diagnostics_markdown(s.get("eval_result", {})) if isinstance(s, dict) else "Sin diagnóstico todavía.", |
| inputs=[session], |
| outputs=[diagnostics_tab_md] |
| ) |
|
|
| next_btn.click( |
| fn=go_next, |
| inputs=[wizard_idx], |
| outputs=[wizard_idx, stepper, step0, step1, step2, step3, step4, step5, step6, step7] |
| ) |
|
|
| prev_btn.click( |
| fn=go_prev, |
| inputs=[wizard_idx], |
| outputs=[wizard_idx, stepper, step0, step1, step2, step3, step4, step5, step6, step7] |
| ) |
|
|
| ask_def.click( |
| fn=lambda reto, d, r, c, g, i, actors: suggest_section_answer( |
| reto, "definicion", |
| {"definicion": d, "relevancia": r, "conexion": c, "gobernanza": g, "iniciativas": i, "actors": actors} |
| ), |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state], |
| outputs=[definicion] |
| ) |
|
|
| ask_rel.click( |
| fn=lambda reto, d, r, c, g, i, actors: suggest_section_answer( |
| reto, "relevancia", |
| {"definicion": d, "relevancia": r, "conexion": c, "gobernanza": g, "iniciativas": i, "actors": actors} |
| ), |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state], |
| outputs=[relevancia] |
| ) |
|
|
| ask_con.click( |
| fn=lambda reto, d, r, c, g, i, actors: suggest_section_answer( |
| reto, "conexion", |
| {"definicion": d, "relevancia": r, "conexion": c, "gobernanza": g, "iniciativas": i, "actors": actors} |
| ), |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state], |
| outputs=[conexion] |
| ) |
|
|
| actor_add_btn.click( |
| fn=add_actor, |
| inputs=[actor_name, actor_role, actors_state], |
| outputs=[actors_state, actor_md, actor_name, actor_role, actor_select_remove] |
| ) |
|
|
| actor_remove_btn.click( |
| fn=remove_selected_actor, |
| inputs=[actor_select_remove, actors_state], |
| outputs=[actors_state, actor_md, actor_select_remove] |
| ) |
|
|
| ask_gob.click( |
| fn=lambda reto, d, r, c, g, i, actors: suggest_section_answer( |
| reto, "gobernanza", |
| {"definicion": d, "relevancia": r, "conexion": c, "gobernanza": g, "iniciativas": i, "actors": actors} |
| ), |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state], |
| outputs=[gobernanza] |
| ) |
|
|
| ask_ini.click( |
| fn=lambda reto, d, r, c, g, i, actors: suggest_section_answer( |
| reto, "iniciativas", |
| {"definicion": d, "relevancia": r, "conexion": c, "gobernanza": g, "iniciativas": i, "actors": actors} |
| ), |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state], |
| outputs=[iniciativas] |
| ) |
|
|
| autofill_btn.click( |
| fn=autofill_all_sections, |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state], |
| outputs=[definicion, relevancia, conexion, gobernanza, iniciativas] |
| ) |
|
|
| board_btn.click( |
| fn=create_board, |
| inputs=[reto_hidden, definicion, relevancia, conexion, gobernanza, iniciativas, actors_state, session], |
| outputs=[board_status, board_preview, board_download, session] |
| ) |
|
|
| clear_btn.click( |
| fn=reset_all, |
| outputs=[ |
| session, actors_state, wizard_idx, stepper, |
| reto_input, eval_md, eval_chart, diagnostics_md, reto_hidden, |
| definicion, relevancia, conexion, actor_md, actor_select_remove, |
| gobernanza, iniciativas, board_preview, board_download, remote_status, |
| step0, step1, step2, step3, step4, step5, step6, step7, |
| prev_btn, next_btn |
| ] |
| ).then( |
| fn=lambda: "Sin diagnóstico todavía.", |
| outputs=[diagnostics_tab_md] |
| ) |
|
|
| demo.load(fn=remote_status_text, outputs=[remote_status]) |
|
|
| return demo |
|
|
|
|
| bootstrap() |
| app = build_app() |
|
|
| if __name__ == "__main__": |
| app.launch() |
|
|