|
|
| |
| from dataclasses import dataclass |
| from typing import List, Dict, Tuple |
| from datetime import datetime |
| from pathlib import Path |
| import os, math, json, requests |
|
|
|
|
| |
| @dataclass(frozen=True) |
| class Item: |
| code: str |
| text: str |
| dimension: str |
| reverse: bool |
|
|
| ITEMS: List[Item] = [ |
| |
| Item("COG1", "Antes de decidir, evalúo cómo una acción afecta a otras áreas.", "COG", False), |
| Item("COG2", "Anticipo escenarios y consecuencias más allá del corto plazo.", "COG", False), |
| Item("COG3", "Identifico patrones y relaciones entre procesos aparentemente aislados.", "COG", False), |
| Item("COG4", "Prefiero concentrarme solo en mi área aunque desconozca las demás.", "COG", True), |
| Item("COG5", "Decido principalmente según urgencias del día a día.", "COG", True), |
| Item("COG6", "Me cuesta ver el impacto sistémico de mis decisiones.", "COG", True), |
| |
| Item("EMO1", "Identifico emociones que influyen en mi trato con otros.", "EMO", False), |
| Item("EMO2", "Regulo mi respuesta emocional aun bajo presión.", "EMO", False), |
| Item("EMO3", "Practico la empatía para comprender perspectivas distintas.", "EMO", False), |
| Item("EMO4", "Suelo reprimir emociones para no mostrar debilidad.", "EMO", True), |
| Item("EMO5", "Cuando me frustro, reacciono de forma impulsiva.", "EMO", True), |
| Item("EMO6", "Evito hablar de emociones en el trabajo.", "EMO", True), |
| |
| Item("REL1", "Busco acuerdos que integren intereses distintos.", "REL", False), |
| Item("REL2", "Comunico expectativas de forma clara y verifico entendimiento.", "REL", False), |
| Item("REL3", "Fomento colaboración efectiva entre áreas y niveles.", "REL", False), |
| Item("REL4", "Evito confrontar para no generar conflicto.", "REL", True), |
| Item("REL5", "Prefiero trabajar de forma individual para avanzar más rápido.", "REL", True), |
| Item("REL6", "Me cuesta adaptar mi comunicación según el interlocutor.", "REL", True), |
| |
| Item("EJE1", "Transformo errores en oportunidades de aprendizaje.", "EJE", False), |
| Item("EJE2", "Cumplo compromisos en tiempo forma de manera consistente.", "EJE", False), |
| Item("EJE3", "Itero procesos para mejorar resultados de forma continua.", "EJE", False), |
| Item("EJE4", "Cumplir metas es más importante que desarrollar a las personas.", "EJE", True), |
| Item("EJE5", "Me desanimo fácilmente cuando surgen obstáculos.", "EJE", True), |
| Item("EJE6", "Evito revisar resultados para no encontrar fallas.", "EJE", True), |
| ] |
|
|
| DIMENSIONS = ["COG", "EMO", "REL", "EJE"] |
| DIMENSION_LABELS = { |
| "COG": "Cognitiva", |
| "EMO": "Emocional", |
| "REL": "Relacional", |
| "EJE": "Ejecucional", |
| } |
|
|
| def invert_if_needed(value: float, reverse: bool) -> float: |
| return 6 - value if reverse else value |
|
|
| def score_responses(responses: Dict[str, float]) -> Dict[str, float]: |
| dim_values = {d: [] for d in DIMENSIONS} |
| for code, v in responses.items(): |
| |
| fv = float(v) |
| if not (1 <= fv <= 5): |
| raise ValueError(f"Respuesta fuera de 1-5 en {code}: {fv}") |
| |
| for it in ITEMS: |
| v = float(responses[it.code]) |
| dim_values[it.dimension].append(invert_if_needed(v, it.reverse)) |
| dim_avg = {d: sum(vals)/len(vals) for d, vals in dim_values.items()} |
| vals = list(dim_avg.values()) |
| avg = sum(vals)/len(vals) |
| var = sum((x-avg)**2 for x in vals) / len(vals) |
| stdev = (var ** 0.5) |
| balance_360 = 1 - (stdev / avg) if avg > 0 else 0.0 |
| return {**dim_avg, "BALANCE_360": balance_360} |
|
|
| def dominant_axis(dim_scores: Dict[str, float]) -> Tuple[str | None, float]: |
| dims = ["COG", "EMO", "REL", "EJE"] |
| best = max(dim_scores[d] for d in dims) |
| winners = [d for d in dims if abs(dim_scores[d] - best) < 1e-9] |
| if len(winners) > 1: |
| return None, best |
| return winners[0], best |
|
|
|
|
| def interpret(dim_scores: Dict[str, float]) -> Dict[str, str]: |
| bal = dim_scores["BALANCE_360"] |
| if bal > 0.85: |
| eq = "Mentalidad 360 desarrollada" |
| elif bal >= 0.70: |
| eq = "Parcialmente equilibrada" |
| else: |
| eq = "Tendencia a sesgo gerencial (un eje domina)" |
| best, val = dominant_axis(dim_scores) |
| perfiles = { |
| "COG": "Estratega analítico (Cognitivo)", |
| "EMO": "Líder empático (Emocional)", |
| "REL": "Conector colaborativo (Relacional)", |
| "EJE": "Gestor ejecutor (Ejecucional)", |
| } |
| if best is None: |
| eje_txt = "Sin eje dominante (perfil balanceado)" |
| else: |
| eje_txt = f"{perfiles[best]} — {DIMENSION_LABELS[best]} ({val:.2f}/5)" |
| return {"equilibrio": eq, "eje_dominante": eje_txt} |
|
|
|
|
| |
| def radar_plot(dim_scores: Dict[str, float], title: str, out_png: str) -> str: |
| import numpy as np |
| import matplotlib.pyplot as plt |
|
|
| DIM_ORDER = ["COG", "EMO", "REL", "EJE"] |
| labels = [DIMENSION_LABELS[d] for d in DIM_ORDER] |
| vals = [float(dim_scores[d]) for d in DIM_ORDER] |
|
|
| angles = np.linspace(0, 2*np.pi, len(labels), endpoint=False) |
| angles_cycle = np.concatenate([angles, [angles[0]]]) |
| vals_cycle = np.concatenate([vals, [vals[0]]]) |
|
|
| fig = plt.figure(figsize=(8, 8)) |
| ax = plt.subplot(111, polar=True) |
| ax.set_facecolor("white") |
| fig.subplots_adjust(bottom=0.10) |
| |
|
|
|
|
| ax.set_theta_offset(np.pi / 2) |
| ax.set_theta_direction(-1) |
|
|
| ax.set_xticks(angles) |
| ax.set_xticklabels(labels, fontsize=14, fontweight="bold") |
|
|
| ax.set_ylim(0, 5) |
| ax.set_yticks([1, 2, 3, 4, 5]) |
| ax.set_yticklabels(["1", "2", "3", "4", "5"], fontsize=11) |
| ax.yaxis.grid(True, linewidth=0.8, alpha=0.6) |
| ax.xaxis.grid(True, linewidth=0.8, alpha=0.6) |
|
|
| ax.plot(angles_cycle, vals_cycle, linewidth=2.2) |
| ax.fill(angles_cycle, vals_cycle, alpha=0.18) |
|
|
| for ang, v in zip(angles, vals): |
| ax.plot([ang], [v], marker="o", markersize=6) |
| ax.text(ang, min(5, v + 0.22), f"{v:.2f}", ha="center", va="center", fontsize=11, fontweight="bold") |
|
|
| ax.set_title(title, fontsize=22, fontweight="bold", pad=18) |
| bal = float(dim_scores.get("BALANCE_360", 0.0)) |
| dom = max(DIM_ORDER, key=lambda d: dim_scores[d]) |
| subtitle = f"Balance 360: {bal:.3f} · Eje dominante: {DIMENSION_LABELS[dom]} ({dim_scores[dom]:.2f}/5)" |
| fig.text(0.5, 0.03, subtitle, ha="center", va="center", fontsize=11) |
|
|
| fig.tight_layout() |
| fig.savefig(out_png, dpi=240, bbox_inches="tight") |
| plt.close(fig) |
| return out_png |
|
|
|
|
| |
| from huggingface_hub import InferenceClient |
|
|
| HF_MODEL_ID = os.environ.get("HF_MODEL_ID", "microsoft/Phi-3.5-mini-instruct") |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) |
|
|
| def build_prompt(scores: Dict[str, float], inter: Dict[str, str], style: str = "A") -> str: |
| |
| if style == "A": |
| tono = "Tono ejecutivo corporativo, directo, orientado a resultados y KPIs duros. Lenguaje de consultoría." |
| elif style == "B": |
| tono = "Tono humano, empático y de coaching; reflexivo, con foco en bienestar y hábito sostenible." |
| else: |
| tono = "Tono académico y pedagógico, con objetivos formativos, evidencias y criterios de evaluación." |
|
|
| lines = [ |
| f"Eres un coach experto y debes responder con estilo {style}. {tono}", |
| "Con base en el siguiente diagnóstico MG360 (1–5), genera un plan de desarrollo práctico, accionable y medible.", |
| "", |
| "Resultados por eje:", |
| f"- Cognitiva: {scores['COG']:.2f}/5", |
| f"- Emocional: {scores['EMO']:.2f}/5", |
| f"- Relacional: {scores['REL']:.2f}/5", |
| f"- Ejecucional:{scores['EJE']:.2f}/5", |
| f"- Balance 360: {scores['BALANCE_360']:.3f}", |
| f"- Interpretación: {inter['equilibrio']} | {inter['eje_dominante']}", |
| "", |
| "Instrucciones de salida (OBLIGATORIO):", |
| "1) Empieza con un resumen de 2–3 líneas del perfil SIN repetir textualmente los puntajes.", |
| "2) Para cada eje (Cognitiva, Emocional, Relacional, Ejecucional):", |
| " - Redacta exactamente 3 ACCIONES SMART (Específica, Medible, Alcanzable, Relevante, con Tiempo).", |
| " - Cada acción debe incluir: Responsable (yo/equipo), Indicador CUANTITATIVO (%, #, tiempo) y Fecha o semana límite.", |
| " - Evita vaguedades tipo 'mejorar' o 'aumentar' sin cifra; usa metas con cifra base y objetivo.", |
| "3) Cierra con un Plan de 30 días en formato Semana 1, Semana 2, Semana 3, Semana 4:", |
| " - Para cada semana: objetivo, entregable concreto y criterio de éxito medible.", |
| "4) Añade una sección de riesgos y mitigaciones (3–5 ítems).", |
| "5) Mantén consistente el estilo seleccionado (A/B/C).", |
| "6) Máximo ~900–1100 palabras en total. Si te quedas corto, prioriza Ejecucional y Relacional.", |
| "7) No repitas ni reinicies la sección de 'Plan de 30 días'. No agregues 'Semana 5'.", |
| ] |
| return "\n".join(lines) |
|
|
|
|
|
|
|
|
| |
| |
| |
| def _providers_chat_completion(model_id: str, system_prompt: str, user_prompt: str) -> dict: |
| url = "https://router.huggingface.co/v1/chat/completions" |
| headers = {"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"} |
| payload = { |
| "model": model_id, |
| "messages": [ |
| {"role": "system", "content": system_prompt}, |
| {"role": "user", "content": user_prompt}, |
| ], |
| "max_tokens": 900, |
| "temperature": 0.7, |
| "top_p": 0.9, |
| "stream": False, |
| } |
| r = requests.post(url, headers=headers, json=payload, timeout=180) |
| if r.status_code == 404: |
| raise RuntimeError(f"[Providers 404] Modelo '{model_id}' no disponible/ID inválido.") |
| if r.status_code == 503: |
| raise RuntimeError(f"[Providers 503] Warming/sin recursos para '{model_id}'.") |
| r.raise_for_status() |
| data = r.json() |
| return data |
|
|
|
|
|
|
|
|
| def generate_plan_with_phi(prompt: str) -> str: |
| if not HF_TOKEN: |
| return "[Aviso] Falta HF_TOKEN. Configúralo en Settings → Variables & secrets." |
| |
| try: |
| client = InferenceClient(HF_MODEL_ID, token=HF_TOKEN, timeout=180) |
| out = client.text_generation( |
| prompt, |
| max_new_tokens=900, |
| do_sample=True, |
| temperature=0.7, |
| top_p=0.9, |
| repetition_penalty=1.05, |
| return_full_text=False, |
| ) |
| if isinstance(out, str) and out.strip(): |
| return out.strip() |
| except Exception: |
| pass |
|
|
| |
| |
| system_msg = ("Eres un coach ejecutivo experto en ICB4 (People, Practice, Perspective). " |
| "Responde en español con plan accionable, SMART y medible.") |
| fallbacks = [ |
| HF_MODEL_ID, |
| "Qwen/Qwen2.5-3B-Instruct", |
| "google/gemma-2-2b-it", |
| "meta-llama/Llama-3.2-3B-Instruct", |
| ] |
| |
| def _extract_text(data: dict) -> str: |
| try: |
| return (data.get("choices", [{}])[0] |
| .get("message", {}) |
| .get("content", "") |
| .strip()) |
| except Exception: |
| return "" |
|
|
| def _finish_reason(data: dict) -> str: |
| try: |
| return data.get("choices", [{}])[0].get("finish_reason", "") |
| except Exception: |
| return "" |
|
|
| |
| errors = [] |
| for mid in fallbacks: |
| try: |
| |
| data1 = _providers_chat_completion(mid, system_msg, prompt) |
| txt = _extract_text(data1) |
| if not txt: |
| continue |
| fr = _finish_reason(data1) |
| |
| |
| if fr == "length" or txt.strip().endswith(("Responsable:", "Indicador:", "Fecha límite:")): |
| cont_user = ("Continúa exactamente donde te quedaste, sin repetir nada anterior. " |
| "Mantén el mismo formato de secciones y listas. Último fragmento:\n\n" |
| + txt[-800:]) |
| data2 = _providers_chat_completion(mid, system_msg, cont_user) |
| txt2 = _extract_text(data2) |
| if txt2: |
| txt = txt + "\n" + txt2 |
| return txt |
| except Exception as ex: |
| errors.append(f"{mid}: {ex}") |
| continue |
| |
| return "[Error Providers] Ningún modelo respondió:\n" + "\n".join(errors) |
|
|
|
|
| |
| def items_schema() -> List[Dict[str, str]]: |
| return [{"code": it.code, "text": it.text, "dimension": DIMENSION_LABELS[it.dimension], "reverse": it.reverse} for it in ITEMS] |
|
|
| def _ensure_outdir() -> Path: |
| out_dir = Path("mg360_resultados"); out_dir.mkdir(parents=True, exist_ok=True); return out_dir |
|
|
| import re |
|
|
| def sanitize_week_plan_smart(text: str) -> str: |
| """ |
| Fusión automática inteligente de planes semanales (Semana 1–4). |
| - Elimina Semana 5+. |
| - Captura todas las versiones de Semana 1..4 que el LLM haya generado. |
| - Para cada semana, elige la mejor versión según score de calidad. |
| - Reconstruye una sección única y ordenada (Semana 1 → 4). |
| - Reconstruye una sola sección coherente. |
| """ |
|
|
| |
| text = re.sub( |
| r"\n\s*Semana\s*([5-9]\d*)\b.*?(?=\n\S|\Z)", |
| "", |
| text, |
| flags=re.IGNORECASE | re.DOTALL, |
| ) |
|
|
| |
| week_pattern = re.compile( |
| r"(^|\n)\s*(Semana\s*(\d+))\b\s*:?[\t ]*\n?(.*?)(?=(\n\s*Semana\s*\d+\b)|\Z)", |
| flags=re.IGNORECASE | re.DOTALL, |
| ) |
| matches = list(week_pattern.finditer(text)) |
| if not matches: |
| return text |
|
|
| |
| weeks_map = {} |
| for m in matches: |
| header_full = m.group(2) |
| week_num = int(m.group(3)) |
| body = (m.group(4) or "").strip() |
| if week_num not in weeks_map: |
| weeks_map[week_num] = [] |
| weeks_map[week_num].append((m.start(), m.end(), header_full.strip(), body)) |
|
|
| |
| def score_block(body: str) -> int: |
| b = body.lower() |
| score = 0 |
| if "objetivo" in b: score += 2 |
| if "entregable" in b or "entregables" in b: score += 2 |
| if "criterio de éxito" in b or "criterios de éxito" in b or "criterio" in b: score += 2 |
| words = len(re.findall(r"\w+", body)) |
| if words >= 50: score += 1 |
| if words >= 120: score += 1 |
| return score |
|
|
| |
| chosen_blocks = {} |
| for week in [1, 2, 3, 4]: |
| if week not in weeks_map: |
| continue |
| candidates = weeks_map[week] |
| |
| best_idx, best_score = None, -10**9 |
| for idx, (_s, _e, header, body) in enumerate(candidates): |
| sc = score_block(body) |
| |
| if sc > best_score or (sc == best_score and idx > (best_idx if best_idx is not None else -1)): |
| best_idx, best_score = idx, sc |
| _s, _e, header, body = candidates[best_idx] |
| chosen_blocks[week] = (header, body) |
|
|
| |
| if not chosen_blocks: |
| return text |
|
|
| |
| plan_hdr = re.search(r"Plan\s+de\s+30\s*d[ií]as\s*:?", text, flags=re.IGNORECASE) |
| if plan_hdr: |
| insert_pos = plan_hdr.end() |
| |
| |
| pre = text[:insert_pos] |
| post = text[insert_pos:] |
| else: |
| |
| first_week_start = min((arr[0][0] for arr in weeks_map.values())) |
| pre = text[:first_week_start] |
| post = text[first_week_start:] |
|
|
| |
| post_clean = week_pattern.sub("", post) |
|
|
| |
| merged_lines = [] |
| if not plan_hdr: |
| merged_lines.append("\nPlan de 30 días:\n") |
| for w in [1, 2, 3, 4]: |
| if w in chosen_blocks: |
| header, body = chosen_blocks[w] |
| merged_lines.append(f"\n{header}\n{body.strip()}\n") |
|
|
| merged = "".join(merged_lines).rstrip() + "\n" |
|
|
| |
| return pre + "\n" + merged + post_clean |
|
|
|
|
|
|
| def _evaluate_internal(res_vals: List[int], style: str): |
| schema = items_schema() |
| responses = { schema[i]["code"]: int(res_vals[i]) for i in range(len(schema)) } |
| scores = score_responses(responses) |
| inter = interpret(scores) |
|
|
| ts = datetime.now().strftime("%Y%m%d_%H%M%S") |
| out_dir = _ensure_outdir() |
| out_json = out_dir / f"mg360_reporte_{ts}.json" |
| out_png = out_dir / f"mg360_radar_{ts}.png" |
| radar_plot(scores, "Perfil MG360 (1–5)", str(out_png)) |
|
|
| with open(out_json, "w", encoding="utf-8") as f: |
| json.dump({"responses": responses, "scores": scores, "interpretation": inter}, f, ensure_ascii=False, indent=2) |
|
|
| md = [ |
| "**Resultados**", |
| *(f"- {DIMENSION_LABELS[d]}: {scores[d]:.2f}/5" for d in ["COG","EMO","REL","EJE"]), |
| f"- **BALANCE 360**: {scores['BALANCE_360']:.3f}", |
| "", |
| "**Interpretación**", |
| f"- Equilibrio: {inter['equilibrio']}", |
| f"- Eje dominante: {inter['eje_dominante']}", |
| ] |
|
|
| |
| plan_prompt = build_prompt(scores, inter, style) |
| plan_text = generate_plan_with_phi(plan_prompt) |
| |
| |
|
|
| return ( |
| str(out_png), |
| "\n".join(md), |
| json.dumps({"responses": responses, "scores": scores, "interpretation": inter}, ensure_ascii=False, indent=2), |
| plan_text |
| ) |
|
|
| |
| import gradio as gr |
|
|
| schema = items_schema() |
| with gr.Blocks() as demo: |
| gr.Markdown("# Test MG360 (24 ítems) — Versión Avanzada + Plan IA") |
| gr.Markdown("**Escala 1–5:** 1=**Nunca**, 2=**Rara vez**, 3=**A veces**, 4=**Frecuente**, 5=**Siempre**.") |
|
|
| with gr.Accordion("Cuestionario (24 ítems)", open=True): |
| gr.Markdown("### Guía de escala: 1=**Nunca** · 2=**Rara vez** · 3=**A veces** · 4=**Frecuente** · 5=**Siempre**") |
| sliders = [ |
| gr.Slider(1, 5, step=1, value=3, |
| label=f"{it['code']} — {it['text']} (1 Nunca · 2 Rara vez · 3 A veces · 4 Frecuente · 5 Siempre)") |
| for it in schema |
| ] |
|
|
| with gr.Row(): |
| btn = gr.Button("Evaluar", scale=1) |
| |
| style = gr.Dropdown( |
| choices=["A", "B", "C"], |
| value="A", |
| label="Estilo del Plan (A=Ejecutivo, B=Coaching, C=Académico)", |
| scale=2 |
| ) |
| model_id = gr.Textbox(value=HF_MODEL_ID, label="HF_MODEL_ID (opcional)", scale=3) |
| |
| img = gr.Image(type="filepath", label="Radar 4D (1–5)") |
| md = gr.Markdown() |
| js = gr.Code(language="json", label="Reporte (JSON)") |
| plan= gr.Markdown(label="Plan de desarrollo (IA)") |
| |
| def evaluate(*vals): |
| |
| global HF_MODEL_ID |
| vals = list(vals) |
| HF_MODEL_ID = vals.pop() or HF_MODEL_ID |
| sel_style = vals.pop() or "A" |
| |
| return _evaluate_internal(vals, sel_style) |
| |
| |
| inputs = sliders + [style, model_id] |
| btn.click(fn=evaluate, inputs=inputs, outputs=[img, md, js, plan]) |
|
|
|
|
| |
| if __name__ == "__main__": |
| demo.launch() |
|
|