Spaces:
Sleeping
Sleeping
File size: 11,196 Bytes
f031a92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 |
import os
import io
import re
import math
import random
import shutil
import traceback
from io import StringIO
import gradio as gr
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.ensemble import RandomForestClassifier
import chess, chess.pgn, chess.engine
# Graficos como imagen
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
APP_TITLE = "DecodeChess-IA — Doctor Linux (gráficos en imagen)"
ENGINE_CANDIDATES = ["stockfish","/usr/bin/stockfish","/usr/games/stockfish","/bin/stockfish","/usr/local/bin/stockfish"]
# ---------------- Motor ----------------
def load_engine():
path = shutil.which("stockfish")
if path:
try:
return chess.engine.SimpleEngine.popen_uci(path)
except Exception:
pass
last = None
for p in ENGINE_CANDIDATES:
try:
return chess.engine.SimpleEngine.popen_uci(p)
except Exception as e:
last = e
raise RuntimeError(f"No se pudo iniciar Stockfish. Último error: {last}")
def score_cp(score_obj) -> float:
try:
pov = score_obj.pov(chess.WHITE)
except Exception:
pov = score_obj
if pov.is_mate():
m = pov.mate()
if m is None:
return 0.0
return 100000.0 if m > 0 else -100000.0
return float(pov.score(mate_score=100000))
# ---------------- PGN & features ----------------
def repair_pgn_text(text: str) -> str:
text = text.replace("\r\n","\n").replace("\r","\n")
if "[Event " not in text:
head = ['[Event "?"]','[Site "?"]','[Date "????.??.??"]','[Round "?"]','[White "?"]','[Black "?"]','[Result "*"]','','']
text = "\n".join(head) + text
return text.strip()+"\n"
PIECE_VALUES = {chess.PAWN:1, chess.KNIGHT:3, chess.BISHOP:3, chess.ROOK:5, chess.QUEEN:9}
CENTER = [chess.D4, chess.E4, chess.D5, chess.E5]
def material_eval(board: chess.Board):
s=0
for pt,v in PIECE_VALUES.items():
s+=len(board.pieces(pt,chess.WHITE))*v
s-=len(board.pieces(pt,chess.BLACK))*v
return s
def mobility(board: chess.Board): return board.legal_moves.count()
def hanging_pieces(board: chess.Board, color=chess.WHITE):
c=0
for sq in chess.SquareSet(board.occupied_co[color]):
if board.is_attacked_by(not color, sq) and not board.is_attacked_by(color, sq):
c+=1
return c
def basic_features(board: chess.Board):
return {
"turn_white": 1 if board.turn else 0,
"mat_cp": material_eval(board),
"mobility": mobility(board),
"hanging_w": hanging_pieces(board, chess.WHITE),
"hanging_b": hanging_pieces(board, chess.BLACK),
"center_pawns": int(any(board.piece_at(sq) and board.piece_at(sq).piece_type==chess.PAWN for sq in CENTER)),
"in_check": 1 if board.is_check() else 0,
"phase": len(board.move_stack),
}
FEATURE_ORDER = ["turn_white","mat_cp","mobility","hanging_w","hanging_b","center_pawns","in_check","phase","eval_before_cp"]
def delta_to_label(delta_cp: float) -> str:
drop = -delta_cp
if drop < 20: return "Best"
if drop < 60: return "Good"
if drop < 120: return "Inaccuracy"
if drop < 300: return "Mistake"
return "Blunder"
# ---------------- Entrenamiento rápido ----------------
def generate_training(engine, games=14, plies_per_game=24, time_per=0.05):
rows=[]
for _ in range(games):
board = chess.Board()
for _ in range(plies_per_game):
if board.is_game_over(): break
info_b = engine.analyse(board, chess.engine.Limit(time=time_per))
eval_b = score_cp(info_b["score"])
legal = list(board.legal_moves)
if not legal: break
move = random.choice(legal)
feats = basic_features(board); feats["eval_before_cp"]=eval_b
board.push(move)
info_a = engine.analyse(board, chess.engine.Limit(time=time_per))
eval_a = score_cp(info_a["score"])
delta = eval_a - eval_b if feats["turn_white"] else -(eval_a - eval_b)
row = {k: feats.get(k,0.0) for k in FEATURE_ORDER}
row["delta_cp"]=delta; row["label"]=delta_to_label(delta)
rows.append(row)
return pd.DataFrame(rows)
def train_model_if_needed():
if os.path.exists("model_rf.joblib"):
try: return load("model_rf.joblib")
except Exception: pass
eng = load_engine()
try: df = generate_training(eng)
finally: eng.quit()
X = df[FEATURE_ORDER].astype(float); y=df["label"]
clf = RandomForestClassifier(n_estimators=120, random_state=42, n_jobs=-1)
clf.fit(X,y); dump(clf,"model_rf.joblib"); return clf
# ---------------- Explicaciones ----------------
def explain(label, delta_cp, in_check, hanging_w, hanging_b):
tips=[]
if label=="Blunder": tips.append("Error grave; posible táctica o pieza colgando.")
elif label=="Mistake": tips.append("Cede ventaja significativa.")
elif label=="Inaccuracy": tips.append("Había opciones mejores.")
elif label=="Good": tips.append("Jugada sólida.")
else: tips.append("Excelente jugada.")
if in_check: tips.append("Rey bajo ataque.")
if hanging_w or hanging_b: tips.append("Piezas atacadas sin defensa.")
tips.append(f"Δ={round(delta_cp,1)} cp.")
return " ".join(tips)
# ---------------- Gráficos (guardar como imagen) ----------------
def save_eval_plot(eval_series, title):
fig, ax = plt.subplots(figsize=(10,4))
if not eval_series:
ax.text(0.5,0.5,"Sin datos",ha="center",va="center",transform=ax.transAxes); ax.axis("off")
else:
xs = list(range(1,len(eval_series)+1))
ax.plot(xs, eval_series, linewidth=2)
ax.axhline(0, linestyle="--", linewidth=1)
ax.set_xlabel("Ply"); ax.set_ylabel("Centipawns"); ax.set_title(title); ax.grid(True, alpha=0.3)
fig.tight_layout()
out = "eval.png"; fig.savefig(out, dpi=120); plt.close(fig); return out
def save_errbars(labels):
fig, ax = plt.subplots(figsize=(6,4))
cats = ["Best","Good","Inaccuracy","Mistake","Blunder"]
counts = [sum(1 for l in labels if l==c) for c in cats]
ax.bar(cats, counts); ax.set_ylabel("Cantidad"); ax.set_title("Distribución por categoría ML")
for i,v in enumerate(counts): ax.text(i, v + (max(counts)*0.04 if counts else 0.2), str(v), ha="center", va="bottom")
fig.tight_layout()
out = "errors.png"; fig.savefig(out, dpi=120); plt.close(fig); return out
# ---------------- Análisis PGN ----------------
def analyze_pgn(pgn_text: str, time_per_move=0.2):
pgn_text = repair_pgn_text(pgn_text)
f = StringIO(pgn_text)
game = chess.pgn.read_game(f)
while game is not None and sum(1 for _ in game.mainline_moves()) == 0:
game = chess.pgn.read_game(f)
if game is None:
return None, None, None, None, "PGN vacío o inválido."
engine = load_engine()
model = train_model_if_needed()
board = game.board()
rows=[]; eval_series=[]; labels=[]
exporter = chess.pgn.StringExporter(headers=True, comments=True, variations=False)
node = game; ply=0
while node.variations:
move = node.variation(0).move
turn_white = board.turn
info_b = engine.analyse(board, chess.engine.Limit(time=time_per_move)); eval_b = score_cp(info_b["score"])
feats = basic_features(board); feats["eval_before_cp"]=eval_b
import numpy as np
X = np.array([[feats.get(k,0.0) for k in FEATURE_ORDER]], dtype=float)
label = model.predict(X)[0]
best_move = info_b.get("pv",[move])[0]; best_san = board.san(best_move) if best_move else ""
played_san = board.san(move); board.push(move)
info_a = engine.analyse(board, chess.engine.Limit(time=time_per_move)); eval_a = score_cp(info_a["score"])
delta = eval_a - eval_b if turn_white else -(eval_a - eval_b)
labels.append(label); eval_series.append(eval_a)
text = explain(label, delta, feats["in_check"], feats["hanging_w"], feats["hanging_b"])
node = node.variation(0); node.comment = f"[{label}] Δ={round(delta,1)} | Mejor: {best_san}. {text}"
ply+=1
rows.append({"ply":ply,"turn":"White" if turn_white else "Black","played":played_san,"best":best_san,
"eval_before_cp":round(eval_b,1),"eval_after_cp":round(eval_a,1),"delta_cp":round(delta,1),
"ml_label":label,"explanation":text})
engine.quit()
annotated = game.accept(exporter)
df = pd.DataFrame(rows)
md = ["### Principales errores","| Ply | Turno | Jugada | Δcp | ML |","|---:|:---:|:---|---:|:---|"]
for r in sorted(rows, key=lambda r: r["delta_cp"])[:10]:
md.append(f"| {r['ply']} | {r['turn']} | {r['played']} | {r['delta_cp']} | {r['ml_label']} |")
summary_md = "\n".join(md)
eval_img = save_eval_plot(eval_series, f"Evaluación — {game.headers.get('White','?')} vs {game.headers.get('Black','?')}")
errs_img = save_errbars(labels)
return annotated, summary_md, df.to_csv(index=False), eval_img, errs_img
# ---------------- UI ----------------
with gr.Blocks(title=APP_TITLE) as demo:
gr.Markdown(f"# {APP_TITLE}\n**Motor + ML con visualizaciones** (renderizadas como imágenes)\n- Gráfico de evaluación\n- Gráfico de categorías\n- PGN anotado y CSV")
eg = gr.Examples(
examples=[["[Event \"Demo\"]\n[Site \"?\"]\n[Date \"2024.??.??\"]\n[Round \"?\"]\n[White \"Alice\"]\n[Black \"Bob\"]\n[Result \"1-0\"]\n\n1. e4 e5 2. Nf3 Nc6 3. Bb5 a6 4. Ba4 Nf6 5. O-O Be7 6. Re1 b5 7. Bb3 O-O 8. c3 d5 9. exd5 Nxd5 10. Nxe5 Nxe5 11. Rxe5 c6 12. d4 Bd6 13. Re1 Qh4 14. g3 Qh3 15. Qf3 Bg4 16. Qg2 Rae8 17. Be3 Qh5 18. Nd2 Bh3 19. Qf3 Bg4 20. Qg2 f5 21. Bxd5+ cxd5 22. Qxd5+ Kh8 23. Qxd6 f4 24. Bxf4 Bf3 25. Rxe8 Rxe8 26. Be5 Qh3 27. Nxf3 Qf5 28. Qc6 Rf8 29. Qb7 Qg6 30. Nh4 Qc2 31. Qxg7# 1-0"]],
inputs=[gr.Textbox(visible=False)],
)
pgn_in = gr.Textbox(lines=18, label="PGN (pega tu partida aquí)")
run = gr.Button("Analizar con IA")
with gr.Row():
eval_img = gr.Image(label="Evaluación (imagen)", interactive=False)
errs_img = gr.Image(label="Errores (imagen)", interactive=False)
annotated_out = gr.Textbox(lines=10, label="PGN anotado")
summary_out = gr.Markdown(label="Resumen IA")
files_out = gr.Files(label="Descargas (PGN + CSV)")
def _analyze(pgn_text):
try:
annotated, md, csv_text, eval_png, errs_png = analyze_pgn(pgn_text)
open("anotado.pgn","w",encoding="utf-8").write(annotated if annotated else "")
open("jugadas.csv","w",encoding="utf-8").write(csv_text if csv_text else "")
return eval_png, errs_png, annotated, md, ["anotado.pgn","jugadas.csv"]
except Exception as e:
tb = traceback.format_exc(limit=2)
return None, None, f"Error: {e}\n{tb}", "", []
run.click(_analyze, inputs=[pgn_in], outputs=[eval_img, errs_img, annotated_out, summary_out, files_out])
if __name__ == "__main__":
try:
_ = train_model_if_needed()
except Exception as e:
print("⚠️ Error al entrenar:", e)
demo.launch()
|