webXOS_chess_RL / generator /webXOS_chess_ANN_RL.html
webxos's picture
Rename webXOS_chess_ANN_RL.html to generator/webXOS_chess_ANN_RL.html
ea9c172 verified
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>ANN CHESS RL TRAINER v3.0</title>
<!-- TensorFlow.js for real ML -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@4.10.0/dist/tf.min.js"></script>
<!-- Chess.js for game logic -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/chess.js/0.12.0/chess.min.js"></script>
<!-- JSZip for export -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"></script>
<!-- Font Awesome -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
<style>
:root {
--neon-green: #39FF14;
--neon-blue: #00F3FF;
--neon-purple: #aa00ff;
--neon-red: #ff073a;
--neon-yellow: #ffd300;
--dark-bg: #0a0a0a;
--panel-bg: rgba(15, 15, 15, 0.95);
--grid-bg: rgba(0, 20, 0, 0.3);
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
font-family: 'Courier New', monospace;
}
body {
background: var(--dark-bg);
color: var(--neon-green);
overflow: hidden;
height: 100vh;
position: relative;
}
/* Quantum Field Background */
#quantum-field {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
pointer-events: none;
z-index: -1;
opacity: 0.3;
}
/* Loading Screen */
#loading-screen {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: var(--dark-bg);
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
z-index: 9999;
}
.glitch-text {
font-size: 3rem;
font-weight: bold;
text-transform: uppercase;
position: relative;
color: var(--neon-green);
text-shadow: 0.05em 0 0 var(--neon-green), -0.05em -0.025em 0 var(--neon-purple);
animation: glitch 1s infinite;
}
@keyframes glitch {
0% { transform: translate(0); }
20% { transform: translate(-2px, 2px); }
40% { transform: translate(-2px, -2px); }
60% { transform: translate(2px, 2px); }
80% { transform: translate(2px, -2px); }
100% { transform: translate(0); }
}
.loading-subtitle {
font-size: 1.2rem;
margin: 2rem 0;
color: var(--neon-blue);
}
.loading-progress {
width: 400px;
height: 8px;
background: rgba(57, 255, 20, 0.2);
border-radius: 4px;
overflow: hidden;
}
.loading-bar {
height: 100%;
background: linear-gradient(90deg, var(--neon-green), var(--neon-blue));
width: 0%;
transition: width 0.5s;
box-shadow: 0 0 10px var(--neon-green);
}
/* Main Container */
.container {
display: flex;
height: 100vh;
padding: 10px;
gap: 10px;
opacity: 0;
transition: opacity 1s;
}
/* Side Panels */
.panel {
flex: 1;
background: var(--panel-bg);
border: 1px solid var(--neon-green);
border-radius: 8px;
padding: 15px;
display: flex;
flex-direction: column;
min-width: 320px;
box-shadow: 0 0 20px rgba(0, 255, 0, 0.1);
}
.black-panel {
border-color: #fff;
}
.green-panel {
border-color: var(--neon-green);
}
.panel-title {
text-align: center;
font-size: 1.2rem;
margin-bottom: 15px;
color: var(--neon-green);
text-shadow: 0 0 10px currentColor;
padding-bottom: 8px;
border-bottom: 2px solid currentColor;
}
.black-panel .panel-title {
color: #fff;
}
.green-panel .panel-title {
color: var(--neon-green);
}
/* Metric Grids */
.metric-grid {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 10px;
margin-bottom: 15px;
}
.metric-card {
background: rgba(0, 30, 0, 0.3);
border: 1px solid rgba(0, 255, 0, 0.2);
border-radius: 6px;
padding: 10px;
transition: all 0.3s;
}
.black-panel .metric-card {
border-color: rgba(255, 255, 255, 0.2);
}
.metric-card:hover {
border-color: currentColor;
box-shadow: 0 0 15px rgba(0, 255, 0, 0.3);
}
.metric-label {
font-size: 0.8rem;
color: #aaa;
margin-bottom: 5px;
}
.metric-value {
font-size: 1.2rem;
font-weight: bold;
color: var(--neon-green);
}
.black-panel .metric-value {
color: #fff;
}
.metric-unit {
font-size: 0.8rem;
color: var(--neon-blue);
}
/* Training Controllers */
.training-controller {
background: rgba(0, 20, 0, 0.3);
padding: 15px;
border-radius: 8px;
border: 1px solid rgba(0, 255, 0, 0.2);
margin-bottom: 15px;
}
.controller-title {
font-size: 0.9rem;
color: var(--neon-blue);
margin-bottom: 10px;
text-align: center;
}
.slider-group {
margin-bottom: 12px;
}
.slider-label {
display: block;
margin-bottom: 5px;
color: var(--neon-blue);
font-size: 0.85rem;
}
.black-panel .slider-label {
color: #aaa;
}
.slider-container {
display: flex;
align-items: center;
gap: 10px;
}
input[type="range"] {
flex: 1;
height: 6px;
background: rgba(0, 255, 0, 0.1);
border-radius: 3px;
outline: none;
}
.black-panel input[type="range"] {
background: rgba(255, 255, 255, 0.1);
}
input[type="range"]::-webkit-slider-thumb {
appearance: none;
width: 16px;
height: 16px;
border-radius: 50%;
background: var(--neon-green);
cursor: pointer;
box-shadow: 0 0 8px var(--neon-green);
}
.black-panel input[type="range"]::-webkit-slider-thumb {
background: #fff;
box-shadow: 0 0 8px #fff;
}
.slider-value {
min-width: 60px;
text-align: right;
color: var(--neon-green);
font-weight: bold;
font-size: 0.9rem;
}
.black-panel .slider-value {
color: #fff;
}
/* Neural Network Visualization */
.nn-visualization {
height: 180px;
background: rgba(0, 0, 0, 0.5);
border: 1px solid var(--neon-green);
border-radius: 6px;
padding: 10px;
position: relative;
overflow: hidden;
margin-bottom: 15px;
}
.black-panel .nn-visualization {
border-color: #fff;
}
.nn-layer {
position: absolute;
top: 10px;
bottom: 10px;
display: flex;
flex-direction: column;
justify-content: space-around;
}
.neuron {
width: 10px;
height: 10px;
border-radius: 50%;
background: var(--neon-green);
margin: 5px auto;
opacity: 0.8;
transition: all 0.3s;
box-shadow: 0 0 5px currentColor;
}
.black-panel .neuron {
background: #fff;
box-shadow: 0 0 5px #fff;
}
.neuron.active {
opacity: 1;
background: var(--neon-red);
box-shadow: 0 0 12px var(--neon-red);
animation: pulse 0.8s infinite;
}
.black-panel .neuron.active {
background: var(--neon-red);
box-shadow: 0 0 12px var(--neon-red);
}
@keyframes pulse {
0%, 100% { transform: scale(1); }
50% { transform: scale(1.3); }
}
.connection {
position: absolute;
background: rgba(0, 255, 0, 0.2);
transform-origin: 0 0;
}
.black-panel .connection {
background: rgba(255, 255, 255, 0.2);
}
/* Agent Logs */
.agent-log {
background: rgba(0, 20, 0, 0.8);
border: 1px solid var(--neon-green);
border-radius: 6px;
padding: 10px;
font-family: 'Courier New', monospace;
font-size: 11px;
flex: 1;
min-height: 150px;
overflow-y: auto;
margin-top: auto;
}
.black-panel .agent-log {
border-color: #fff;
background: rgba(20, 20, 20, 0.8);
}
.log-header {
display: flex;
justify-content: space-between;
margin-bottom: 8px;
color: var(--neon-green);
font-weight: bold;
font-size: 0.9rem;
padding-bottom: 5px;
border-bottom: 1px solid rgba(0, 255, 0, 0.3);
}
.black-panel .log-header {
color: #fff;
border-bottom: 1px solid rgba(255, 255, 255, 0.3);
}
.log-line {
margin: 3px 0;
line-height: 1.3;
word-wrap: break-word;
}
.log-line.info { color: var(--neon-blue); }
.log-line.success { color: var(--neon-green); }
.log-line.warning { color: var(--neon-yellow); }
.log-line.error { color: var(--neon-red); }
.black-panel .log-line.info { color: #aaa; }
.black-panel .log-line.success { color: #fff; }
.black-panel .log-line.warning { color: #ffaa00; }
.black-panel .log-line.error { color: #ff5555; }
/* Center Arena */
.center-arena {
flex: 2;
display: flex;
flex-direction: column;
gap: 10px;
min-width: 500px;
}
/* Chess Arena */
.chess-arena {
background: var(--panel-bg);
border: 2px solid var(--neon-green);
border-radius: 8px;
padding: 20px;
display: flex;
flex-direction: column;
align-items: center;
box-shadow: 0 0 30px rgba(0, 255, 0, 0.2);
}
.arena-header {
width: 100%;
display: flex;
justify-content: space-between;
margin-bottom: 15px;
color: var(--neon-green);
font-size: 1.1rem;
}
.chess-board-container {
display: flex;
flex-direction: column;
align-items: center;
}
.chess-board {
display: grid;
grid-template-columns: repeat(8, 1fr);
grid-template-rows: repeat(8, 1fr);
width: 400px;
height: 400px;
border: 3px solid var(--neon-green);
box-shadow: 0 0 25px rgba(0, 255, 0, 0.4);
}
.chess-square {
display: flex;
align-items: center;
justify-content: center;
font-size: 28px;
position: relative;
cursor: default;
}
.chess-square.light {
background: rgba(30, 40, 30, 0.8);
}
.chess-square.dark {
background: rgba(10, 20, 10, 0.8);
}
.chess-square.highlight {
background: rgba(0, 255, 0, 0.3);
box-shadow: inset 0 0 10px rgba(0, 255, 0, 0.5);
}
/* Controls */
.controls {
display: grid;
grid-template-columns: repeat(5, 1fr);
gap: 10px;
padding: 15px;
background: var(--panel-bg);
border: 1px solid var(--neon-green);
border-radius: 8px;
}
.btn {
padding: 10px;
border-radius: 6px;
border: none;
font-weight: bold;
cursor: pointer;
transition: all 0.3s;
font-size: 0.9rem;
text-transform: uppercase;
letter-spacing: 1px;
}
.btn-primary {
background: rgba(0, 255, 0, 0.1);
color: var(--neon-green);
border: 2px solid var(--neon-green);
}
.btn-primary:hover {
background: rgba(0, 255, 0, 0.3);
box-shadow: 0 0 20px var(--neon-green);
transform: translateY(-2px);
}
.btn-blue {
background: rgba(0, 243, 255, 0.1);
color: var(--neon-blue);
border: 2px solid var(--neon-blue);
}
.btn-blue:hover {
background: rgba(0, 243, 255, 0.3);
box-shadow: 0 0 20px var(--neon-blue);
}
.btn-red {
background: rgba(255, 7, 58, 0.1);
color: var(--neon-red);
border: 2px solid var(--neon-red);
}
.btn-red:hover {
background: rgba(255, 7, 58, 0.3);
box-shadow: 0 0 20px var(--neon-red);
}
.btn-purple {
background: rgba(170, 0, 255, 0.1);
color: var(--neon-purple);
border: 2px solid var(--neon-purple);
}
.btn-purple:hover {
background: rgba(170, 0, 255, 0.3);
box-shadow: 0 0 20px var(--neon-purple);
}
/* Progress Section */
.progress-section {
display: flex;
gap: 10px;
align-items: center;
padding: 15px;
background: var(--panel-bg);
border: 1px solid var(--neon-green);
border-radius: 8px;
}
.progress-container {
flex: 1;
height: 8px;
background: rgba(0, 255, 0, 0.1);
border-radius: 4px;
overflow: hidden;
}
.progress-bar {
height: 100%;
background: linear-gradient(90deg, var(--neon-green), var(--neon-blue));
width: 0%;
transition: width 0.5s;
box-shadow: 0 0 10px currentColor;
}
/* Main Terminal */
.main-terminal {
flex: 1;
background: rgba(0, 20, 0, 0.8);
border: 1px solid var(--neon-green);
border-radius: 8px;
padding: 15px;
font-family: 'Courier New', monospace;
font-size: 12px;
display: flex;
flex-direction: column;
}
.terminal-header {
display: flex;
justify-content: space-between;
margin-bottom: 10px;
color: var(--neon-green);
font-weight: bold;
padding-bottom: 8px;
border-bottom: 1px solid rgba(0, 255, 0, 0.3);
}
.terminal-content {
flex: 1;
overflow-y: auto;
}
.terminal-line {
margin: 4px 0;
line-height: 1.4;
word-wrap: break-word;
}
.terminal-line.info { color: var(--neon-blue); }
.terminal-line.success { color: var(--neon-green); }
.terminal-line.warning { color: var(--neon-yellow); }
.terminal-line.error { color: var(--neon-red); }
/* Responsive Design */
@media (max-width: 1200px) {
.container {
flex-direction: column;
}
.panel, .center-arena {
min-width: unset;
}
.chess-board {
width: 300px;
height: 300px;
}
.controls {
grid-template-columns: repeat(3, 1fr);
}
}
@media (max-width: 768px) {
.chess-board {
width: 250px;
height: 250px;
}
.controls {
grid-template-columns: repeat(2, 1fr);
}
.metric-grid {
grid-template-columns: 1fr;
}
}
</style>
</head>
<body>
<!-- Loading Screen -->
<div id="loading-screen">
<div class="glitch-text">ANN CHESS RL TRAINER v3.0</div>
<div class="loading-subtitle">Initializing Neural Networks & Training Environment...</div>
<div class="loading-progress">
<div class="loading-bar"></div>
</div>
<div id="loading-details" style="margin-top: 20px; color: var(--neon-blue); font-size: 0.9rem;"></div>
</div>
<!-- Quantum Background -->
<canvas id="quantum-field"></canvas>
<!-- Main Interface -->
<div class="container">
<!-- Left Panel: Black Agent -->
<div class="panel black-panel">
<div class="panel-title">BLACK AGENT (Policy Network)</div>
<div class="metric-grid">
<div class="metric-card">
<div class="metric-label">Win Rate</div>
<div class="metric-value" id="black-win-rate">0.0<span class="metric-unit">%</span></div>
</div>
<div class="metric-card">
<div class="metric-label">Loss Value</div>
<div class="metric-value" id="black-loss">0.000</div>
</div>
<div class="metric-card">
<div class="metric-label">Learning Rate</div>
<div class="metric-value" id="black-lr">0.0010</div>
</div>
<div class="metric-card">
<div class="metric-label">Exploration</div>
<div class="metric-value" id="black-explore">0.30</div>
</div>
<div class="metric-card">
<div class="metric-label">Games Played</div>
<div class="metric-value" id="black-games">0</div>
</div>
<div class="metric-card">
<div class="metric-label">Moves Made</div>
<div class="metric-value" id="black-moves">0</div>
</div>
</div>
<div class="training-controller">
<div class="controller-title">NEURAL NETWORK CONTROLS</div>
<div class="slider-group">
<label class="slider-label">Learning Rate</label>
<div class="slider-container">
<input type="range" min="1" max="100" value="10"
oninput="updateAgentParam('black', 'learningRate', this.value/10000)">
<span class="slider-value" id="black-lr-value">0.0010</span>
</div>
</div>
<div class="slider-group">
<label class="slider-label">Exploration Rate</label>
<div class="slider-container">
<input type="range" min="0" max="100" value="30"
oninput="updateAgentParam('black', 'explorationRate', this.value/100)">
<span class="slider-value" id="black-explore-value">0.30</span>
</div>
</div>
<div class="slider-group">
<label class="slider-label">Discount Factor</label>
<div class="slider-container">
<input type="range" min="50" max="99" value="95"
oninput="updateAgentParam('black', 'discountFactor', this.value/100)">
<span class="slider-value" id="black-discount-value">0.95</span>
</div>
</div>
</div>
<div class="nn-visualization" id="black-nn-viz">
<!-- Neural network visualization -->
</div>
<div class="agent-log">
<div class="log-header">
<span>Black Agent Log</span>
<span id="black-log-count">0</span>
</div>
<div id="black-log-content"></div>
</div>
</div>
<!-- Center Arena -->
<div class="center-arena">
<div class="chess-arena">
<div class="arena-header">
<span>BLACK</span>
<span id="current-turn">White to move</span>
<span>GREEN</span>
</div>
<div class="chess-board-container">
<div class="chess-board" id="chess-board">
<!-- Chess board will be rendered here -->
</div>
<div style="margin-top: 15px; color: var(--neon-blue); font-size: 0.9rem;">
Game #<span id="current-game">1</span> | Moves: <span id="current-moves">0</span>
</div>
</div>
</div>
<div class="controls">
<button class="btn btn-primary" onclick="startTraining()" id="start-btn">
<i class="fas fa-play"></i> Start Training
</button>
<button class="btn btn-red" onclick="pauseTraining()" id="pause-btn" style="display: none;">
<i class="fas fa-pause"></i> Pause Training
</button>
<button class="btn btn-blue" onclick="exportEverything()" id="export-btn">
<i class="fas fa-file-export"></i> Export All
</button>
<button class="btn btn-purple" onclick="resetTraining()">
<i class="fas fa-redo"></i> Reset All
</button>
<button class="btn btn-primary" onclick="saveState()">
<i class="fas fa-save"></i> Save State
</button>
</div>
<div class="progress-section">
<div class="progress-container">
<div class="progress-bar" id="training-progress"></div>
</div>
<span style="color: var(--neon-green); font-weight: bold; min-width: 200px;">
Games: <span id="total-games">0</span> | Moves: <span id="total-moves">0</span>
</span>
</div>
<div class="main-terminal">
<div class="terminal-header">
<span>TRAINING CONSOLE</span>
<span id="training-timer">00:00:00</span>
</div>
<div class="terminal-content" id="main-terminal"></div>
</div>
</div>
<!-- Right Panel: Green Agent -->
<div class="panel green-panel">
<div class="panel-title">GREEN AGENT (Value Network)</div>
<div class="metric-grid">
<div class="metric-card">
<div class="metric-label">Win Rate</div>
<div class="metric-value" id="green-win-rate">0.0<span class="metric-unit">%</span></div>
</div>
<div class="metric-card">
<div class="metric-label">Loss Value</div>
<div class="metric-value" id="green-loss">0.000</div>
</div>
<div class="metric-card">
<div class="metric-label">Learning Rate</div>
<div class="metric-value" id="green-lr">0.0010</div>
</div>
<div class="metric-card">
<div class="metric-label">Exploration</div>
<div class="metric-value" id="green-explore">0.30</div>
</div>
<div class="metric-card">
<div class="metric-label">Games Played</div>
<div class="metric-value" id="green-games">0</div>
</div>
<div class="metric-card">
<div class="metric-label">Moves Made</div>
<div class="metric-value" id="green-moves">0</div>
</div>
</div>
<div class="training-controller">
<div class="controller-title">NEURAL NETWORK CONTROLS</div>
<div class="slider-group">
<label class="slider-label">Learning Rate</label>
<div class="slider-container">
<input type="range" min="1" max="100" value="10"
oninput="updateAgentParam('green', 'learningRate', this.value/10000)">
<span class="slider-value" id="green-lr-value">0.0010</span>
</div>
</div>
<div class="slider-group">
<label class="slider-label">Exploration Rate</label>
<div class="slider-container">
<input type="range" min="0" max="100" value="30"
oninput="updateAgentParam('green', 'explorationRate', this.value/100)">
<span class="slider-value" id="green-explore-value">0.30</span>
</div>
</div>
<div class="slider-group">
<label class="slider-label">Discount Factor</label>
<div class="slider-container">
<input type="range" min="50" max="99" value="95"
oninput="updateAgentParam('green', 'discountFactor', this.value/100)">
<span class="slider-value" id="green-discount-value">0.95</span>
</div>
</div>
</div>
<div class="nn-visualization" id="green-nn-viz">
<!-- Neural network visualization -->
</div>
<div class="agent-log">
<div class="log-header">
<span>Green Agent Log</span>
<span id="green-log-count">0</span>
</div>
<div id="green-log-content"></div>
</div>
</div>
</div>
<script>
// Global State Management
const state = {
trainingActive: false,
game: null,
currentGameNumber: 1,
trainingStartTime: null,
trainingTimer: null,
lastAutoSave: null,
realTimeInterval: null,
// Neural Networks
neuralNetworks: {
black: null,
green: null
},
// RL Agents
agents: {
black: null,
green: null
},
// Agent Statistics
agentStats: {
black: {
wins: 0,
losses: 0,
draws: 0,
totalGames: 0,
movesMade: 0,
learningRate: 0.001,
explorationRate: 0.3,
discountFactor: 0.95,
lossHistory: []
},
green: {
wins: 0,
losses: 0,
draws: 0,
totalGames: 0,
movesMade: 0,
learningRate: 0.001,
explorationRate: 0.3,
discountFactor: 0.95,
lossHistory: []
}
},
// Training Data
dataset: [],
validGames: 0,
totalMoves: 0,
currentGameData: null
};
// Helper functions for reward calculation
function calculateMaterialAdvantage(game) {
const pieceValues = { p: 1, n: 3, b: 3, r: 5, q: 9, k: 0 };
let white = 0, black = 0;
game.board().forEach(row => {
row.forEach(piece => {
if (piece) {
const value = pieceValues[piece.type] || 0;
if (piece.color === 'w') white += value;
else black += value;
}
});
});
return white - black;
}
function calculateCenterControl(game) {
const center = ['d4', 'd5', 'e4', 'e5'];
let control = 0;
center.forEach(square => {
const piece = game.get(square);
if (piece) {
control += piece.color === 'w' ? 1 : -1;
}
});
return control;
}
// Normalize game data for export
function normalizeGameForExport(game) {
const normalized = {
id: String(game.id || `game_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`),
pgn: String(game.pgn || ''),
moves: Array.isArray(game.moves) ? game.moves.map(m => typeof m === 'object' ? m : {san: String(m)}) : [],
result: String(game.result || 'draw'),
positions: Array.isArray(game.positions) ? game.positions : [],
metadata: typeof game.metadata === 'object' ? game.metadata : {},
start_time: String(game.start_time || new Date().toISOString()),
end_time: String(game.end_time || new Date().toISOString()),
moves_count: Number.isInteger(game.moves_count) ? game.moves_count : (Array.isArray(game.moves) ? game.moves.length : 0),
final_fen: String(game.final_fen || ''),
agent_metadata: typeof game.agent_metadata === 'object' ? game.agent_metadata : {}
};
if (isNaN(normalized.moves_count)) normalized.moves_count = 0;
return normalized;
}
// TensorFlow.js Neural Network
class ChessNeuralNetwork {
constructor(name, color, learningRate = 0.001) {
this.name = name;
this.color = color;
this.learningRate = learningRate;
this.model = null;
this.optimizer = tf.train.adam(learningRate);
this.lossHistory = [];
this.initializeModel();
}
initializeModel() {
this.model = tf.sequential();
this.model.add(tf.layers.dense({
units: 128,
activation: 'relu',
inputShape: [896]
}));
this.model.add(tf.layers.dense({ units: 64, activation: 'relu' }));
this.model.add(tf.layers.dropout({ rate: 0.2 }));
this.model.add(tf.layers.dense({ units: 32, activation: 'relu' }));
this.model.add(tf.layers.dense({ units: 1, activation: 'tanh' }));
this.model.compile({
optimizer: this.optimizer,
loss: 'meanSquaredError',
metrics: ['accuracy']
});
logAgent(this.color, `Neural network initialized (LR: ${this.learningRate})`, 'success');
}
async predict(boardState) {
const input = tf.tensor2d([boardState]);
const prediction = this.model.predict(input);
const value = await prediction.data();
tf.dispose([input, prediction]);
return value[0];
}
async train(states, targets) {
if (states.length === 0) return 0;
const xs = tf.tensor2d(states);
const ys = tf.tensor2d(targets, [targets.length, 1]);
const history = await this.model.fit(xs, ys, {
epochs: 1,
batchSize: Math.min(32, states.length),
verbose: 0
});
const loss = history.history.loss[0];
this.lossHistory.push(loss);
tf.dispose([xs, ys]);
return loss;
}
updateLearningRate(newRate) {
this.learningRate = newRate;
this.optimizer.setLearningRate(newRate);
}
async getWeightsData() {
if (!this.model) return null;
const weights = this.model.getWeights();
const weightsData = [];
for (const tensor of weights) {
const data = await tensor.data();
weightsData.push({
shape: Array.from(tensor.shape),
data: Array.from(data),
dtype: tensor.dtype
});
}
return weightsData;
}
}
// Reinforcement Learning Agent
class RLAgent {
constructor(color, neuralNetwork) {
this.color = color;
this.nn = neuralNetwork;
this.memory = [];
this.experienceSize = 1000;
this.batchSize = 32;
this.gamma = 0.95;
this.lastAction = null;
}
async selectMove(game, explorationRate = 0.3) {
const moves = game.moves();
if (moves.length === 0) return null;
if (Math.random() < explorationRate) {
const randomMove = moves[Math.floor(Math.random() * moves.length)];
logAgent(this.color, `Exploration: ${randomMove}`, 'info');
return randomMove;
}
let bestMove = null;
let bestValue = -Infinity;
for (const move of moves) {
const testGame = new Chess(game.fen());
testGame.move(move);
const state = encodeBoardState(testGame);
const value = await this.nn.predict(state);
const adjustedValue = this.color === 'green' ? value : -value;
if (adjustedValue > bestValue) {
bestValue = adjustedValue;
bestMove = move;
}
}
if (bestMove) {
logAgent(this.color, `Best move: ${bestMove} (value: ${bestValue.toFixed(3)})`, 'info');
}
return bestMove || moves[0];
}
addExperience(state, action, reward, nextState, done) {
this.memory.push({
state,
action,
reward,
nextState,
done,
timestamp: new Date().toISOString()
});
if (this.memory.length > this.experienceSize) {
this.memory.shift();
}
}
async trainFromMemory() {
if (this.memory.length < this.batchSize) return 0;
const batch = [];
const batchSize = Math.min(this.batchSize, this.memory.length);
for (let i = 0; i < batchSize; i++) {
const index = Math.floor(Math.random() * this.memory.length);
batch.push(this.memory[index]);
}
const states = [];
const targets = [];
for (const exp of batch) {
const currentQ = await this.nn.predict(exp.state);
let targetQ = exp.reward;
if (!exp.done) {
const nextQ = await this.nn.predict(exp.nextState);
targetQ += this.gamma * nextQ;
}
states.push(exp.state);
targets.push(targetQ);
}
const loss = await this.nn.train(states, targets);
if (loss < 10) {
state.agentStats[this.color].lossHistory.push(loss);
if (state.agentStats[this.color].lossHistory.length > 100) {
state.agentStats[this.color].lossHistory.shift();
}
}
return loss;
}
}
// Board State Encoding
function encodeBoardState(game) {
const board = game.board();
const encoded = new Array(896).fill(0);
for (let rank = 0; rank < 8; rank++) {
for (let file = 0; file < 8; file++) {
const piece = board[rank][file];
const baseIndex = (rank * 8 + file) * 14;
if (piece) {
const pieceIndex = {
'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5,
'P': 6, 'N': 7, 'B': 8, 'R': 9, 'Q': 10, 'K': 11
}[piece.type + (piece.color === 'w' ? piece.type.toUpperCase() : piece.type)];
if (pieceIndex !== undefined) {
encoded[baseIndex + pieceIndex] = 1;
}
encoded[baseIndex + 12] = piece.color === 'w' ? 1 : 0;
encoded[baseIndex + 13] = 1;
}
}
}
return encoded;
}
// Visualization Functions
function updateQuantumBackground() {
const canvas = document.getElementById('quantum-field');
if (!canvas) return;
const ctx = canvas.getContext('2d');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
function draw() {
ctx.fillStyle = 'rgba(10, 10, 10, 0.1)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
const time = Date.now() * 0.001;
const particleCount = 50;
for (let i = 0; i < particleCount; i++) {
const x = (Math.sin(time + i * 0.1) * 0.5 + 0.5) * canvas.width;
const y = (Math.cos(time * 0.7 + i * 0.05) * 0.5 + 0.5) * canvas.height;
const size = 2 + Math.sin(time + i) * 1;
const hue = (i * 3 + time * 50) % 360;
ctx.beginPath();
ctx.arc(x, y, size, 0, Math.PI * 2);
ctx.fillStyle = `hsla(${hue}, 100%, 50%, 0.3)`;
ctx.fill();
}
requestAnimationFrame(draw);
}
draw();
}
function visualizeNeuralNetwork(containerId, agentColor) {
const container = document.getElementById(containerId);
if (!container) return;
container.innerHTML = '';
const layerSizes = [896, 128, 64, 32, 1];
const width = container.clientWidth;
const height = container.clientHeight;
const layerSpacing = width / (layerSizes.length + 1);
for (let l = 0; l < layerSizes.length; l++) {
const layerDiv = document.createElement('div');
layerDiv.className = 'nn-layer';
layerDiv.style.left = `${(l + 1) * layerSpacing}px`;
const neurons = Math.min(layerSizes[l], 12);
const neuronSpacing = height / (neurons + 1);
for (let n = 0; n < neurons; n++) {
const neuron = document.createElement('div');
neuron.className = 'neuron';
neuron.style.top = `${neuronSpacing * (n + 1)}px`;
if (Math.random() > 0.5) {
neuron.classList.add('active');
neuron.style.animationDelay = `${Math.random() * 2}s`;
}
layerDiv.appendChild(neuron);
}
container.appendChild(layerDiv);
}
setInterval(() => {
const neurons = container.querySelectorAll('.neuron');
neurons.forEach(neuron => {
if (Math.random() > 0.7) {
neuron.classList.toggle('active');
neuron.style.animationDelay = `${Math.random() * 2}s`;
}
});
}, 1000);
}
function renderChessBoard() {
const board = document.getElementById('chess-board');
if (!board || !state.game) return;
board.innerHTML = '';
const files = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'];
const ranks = [8, 7, 6, 5, 4, 3, 2, 1];
for (let rank = 0; rank < 8; rank++) {
for (let file = 0; file < 8; file++) {
const square = files[file] + ranks[rank];
const piece = state.game.get(square);
const div = document.createElement('div');
div.className = `chess-square ${(file + rank) % 2 === 0 ? 'light' : 'dark'}`;
div.dataset.square = square;
if (piece) {
div.textContent = getPieceSymbol(piece.type, piece.color);
}
board.appendChild(div);
}
}
const history = state.game.history();
document.getElementById('current-moves').textContent = history.length;
const turn = state.game.turn() === 'w' ? 'Green' : 'Black';
document.getElementById('current-turn').textContent = `${turn} to move`;
}
function getPieceSymbol(type, color) {
const symbols = {
p: { w: '♙', b: '♟' },
n: { w: '♘', b: '♞' },
b: { w: '♗', b: '♝' },
r: { w: '♖', b: '♜' },
q: { w: '♕', b: '♛' },
k: { w: '♔', b: '♚' }
};
return symbols[type][color];
}
// Training Functions
async function startTraining() {
if (state.trainingActive) return;
state.trainingActive = true;
state.trainingStartTime = Date.now();
document.getElementById('start-btn').style.display = 'none';
document.getElementById('pause-btn').style.display = 'inline-block';
log('Starting neural network training session...', 'success');
log('Two RL agents will play chess against each other', 'info');
log('Left: Black Agent (Policy Network)', 'info');
log('Right: Green Agent (Value Network)', 'info');
startTrainingTimer();
startNewGame();
startRealTimeMonitoring();
}
async function trainingStep() {
if (!state.trainingActive) return;
try {
if (state.game.game_over()) {
saveCurrentGame();
updateAllMetrics();
startNewGame();
return;
}
const currentColor = state.game.turn();
const agentColor = currentColor === 'w' ? 'green' : 'black';
const agent = state.agents[agentColor];
const currentState = encodeBoardState(state.game);
const explorationRate = state.agentStats[agentColor].explorationRate;
const move = await agent.selectMove(state.game, explorationRate);
if (!move) {
log(`No valid moves for ${agentColor}`, 'warning');
saveCurrentGame();
startNewGame();
return;
}
const result = state.game.move(move);
if (!result) {
log(`Invalid move ${move} from ${agentColor}`, 'error');
saveCurrentGame();
startNewGame();
return;
}
state.agentStats[agentColor].movesMade++;
state.totalMoves++;
const nextState = encodeBoardState(state.game);
const gameResult = evaluateGameResult(state.game);
let reward = 0;
if (state.game.game_over()) {
if (gameResult === agentColor) reward = 1;
else if (gameResult === 'draw') reward = 0.1;
else if (gameResult && gameResult !== agentColor) reward = -1;
} else {
const materialDiff = calculateMaterialAdvantage(state.game);
reward = materialDiff * 0.01;
const centerControl = calculateCenterControl(state.game);
reward += centerControl * 0.005;
const mobility = state.game.moves().length;
reward += mobility * 0.001;
if (state.game.in_check()) {
reward += (agentColor === 'green' ? 0.05 : -0.05);
}
}
agent.addExperience(
currentState,
move,
reward,
nextState,
state.game.game_over()
);
const loss = await agent.trainFromMemory();
if (loss > 0) {
logAgent(agentColor, `Trained (loss: ${loss.toFixed(4)})`, 'info');
}
if (state.currentGameData) {
state.currentGameData.moves.push({
san: move,
color: currentColor,
turn: state.currentGameData.moves.length + 1,
timestamp: new Date().toISOString(),
reward: reward,
fen: state.game.fen()
});
state.currentGameData.positions.push(state.game.fen());
}
renderChessBoard();
updateAllMetrics();
setTimeout(trainingStep, 100);
} catch (error) {
console.error("Training error:", error);
log(`Training error: ${error.message}`, 'error');
pauseTraining();
}
}
function startNewGame() {
state.game = new Chess();
state.currentGameNumber++;
state.currentGameData = {
id: `game_${state.currentGameNumber}_${Date.now()}`,
pgn: '',
moves: [],
positions: [state.game.fen()],
start_time: new Date().toISOString(),
metadata: {
black_lr: state.agentStats.black.learningRate,
green_lr: state.agentStats.green.learningRate,
black_explore: state.agentStats.black.explorationRate,
green_explore: state.agentStats.green.explorationRate,
training_iteration: state.currentGameNumber
}
};
document.getElementById('current-game').textContent = state.currentGameNumber;
renderChessBoard();
log(`Starting game #${state.currentGameNumber}`, 'info');
if (state.trainingActive) {
setTimeout(trainingStep, 100);
}
}
function saveCurrentGame() {
if (!state.currentGameData || !state.game) return;
const history = state.game.history();
if (state.currentGameData.moves.length !== history.length) {
state.currentGameData.moves = history.map((san, idx) => ({
san,
color: idx % 2 === 0 ? 'w' : 'b',
turn: idx + 1,
timestamp: new Date().toISOString(),
fen: state.game.fen()
}));
}
let result = 'draw';
if (state.game.in_checkmate()) {
result = state.game.turn() === 'w' ? 'black' : 'green';
} else if (state.game.in_draw() || state.game.in_stalemate()) {
result = 'draw';
} else if (state.game.in_threefold_repetition()) {
result = 'draw';
} else if (state.game.in_check()) {
result = state.game.turn() === 'w' ? 'black' : 'green';
}
state.currentGameData.final_fen = state.game.fen();
state.currentGameData.result = result;
state.currentGameData.end_time = new Date().toISOString();
state.currentGameData.moves_count = state.currentGameData.moves.length;
state.currentGameData.pgn = state.game.pgn();
state.currentGameData.agent_metadata = {
black: {
learning_rate: state.agentStats.black.learningRate,
exploration_rate: state.agentStats.black.explorationRate,
loss: state.agentStats.black.lossHistory.slice(-1)[0] || 0,
total_moves: state.agentStats.black.movesMade,
total_games: state.agentStats.black.totalGames
},
green: {
learning_rate: state.agentStats.green.learningRate,
exploration_rate: state.agentStats.green.explorationRate,
loss: state.agentStats.green.lossHistory.slice(-1)[0] || 0,
total_moves: state.agentStats.green.movesMade,
total_games: state.agentStats.green.totalGames
}
};
if (validateGameData(state.currentGameData)) {
const normalizedGame = normalizeGameForExport(state.currentGameData);
state.dataset.push(normalizedGame);
state.validGames++;
if (result === 'green') {
state.agentStats.green.wins++;
state.agentStats.black.losses++;
} else if (result === 'black') {
state.agentStats.black.wins++;
state.agentStats.green.losses++;
} else {
state.agentStats.green.draws++;
state.agentStats.black.draws++;
}
state.agentStats.green.totalGames++;
state.agentStats.black.totalGames++;
log(`Game ${state.currentGameNumber} completed: ${result} (${state.currentGameData.moves_count} moves)`, 'success');
if (state.validGames % 10 === 0) {
log(`Auto-collected ${state.validGames} games`, 'info');
}
} else {
log(`Game ${state.currentGameNumber} invalid - discarded`, 'warning');
}
}
function evaluateGameResult(game) {
if (game.in_checkmate()) {
return game.turn() === 'w' ? 'black' : 'green';
}
if (game.in_draw() || game.in_stalemate() || game.in_threefold_repetition()) {
return 'draw';
}
return null;
}
function validateGameData(gameData) {
try {
if (!gameData || typeof gameData !== 'object') return false;
if (!gameData.moves || !Array.isArray(gameData.moves)) return false;
if (!gameData.result || !['black', 'green', 'draw'].includes(gameData.result)) return false;
if (gameData.moves.length > 0) {
const testGame = new Chess();
for (let i = 0; i < gameData.moves.length; i++) {
const move = gameData.moves[i];
if (!testGame.move(move.san)) {
log(`Move ${i+1} invalid: ${move.san}`, 'warning');
}
}
}
return gameData.moves_count === gameData.moves.length &&
gameData.id &&
gameData.start_time;
} catch (error) {
log(`Validation error: ${error.message}`, 'warning');
return false;
}
}
function pauseTraining() {
state.trainingActive = false;
document.getElementById('start-btn').style.display = 'inline-block';
document.getElementById('pause-btn').style.display = 'none';
if (state.trainingTimer) {
clearInterval(state.trainingTimer);
state.trainingTimer = null;
}
stopRealTimeMonitoring();
log('Training paused', 'warning');
}
function resetTraining() {
if (state.trainingActive) {
pauseTraining();
}
state.game = new Chess();
state.currentGameNumber = 1;
state.dataset = [];
state.validGames = 0;
state.totalMoves = 0;
state.currentGameData = null;
state.lastAutoSave = null;
state.agentStats = {
black: {
wins: 0,
losses: 0,
draws: 0,
totalGames: 0,
movesMade: 0,
learningRate: 0.001,
explorationRate: 0.3,
discountFactor: 0.95,
lossHistory: []
},
green: {
wins: 0,
losses: 0,
draws: 0,
totalGames: 0,
movesMade: 0,
learningRate: 0.001,
explorationRate: 0.3,
discountFactor: 0.95,
lossHistory: []
}
};
initializeNeuralNetworks();
renderChessBoard();
updateAllMetrics();
document.getElementById('current-game').textContent = '1';
document.getElementById('current-moves').textContent = '0';
document.getElementById('training-timer').textContent = '00:00:00';
log('Training completely reset - all data cleared', 'success');
}
// Real-time Monitoring Functions
function startRealTimeMonitoring() {
if (state.realTimeInterval) {
clearInterval(state.realTimeInterval);
}
state.realTimeInterval = setInterval(() => {
if (state.trainingActive) {
updateAllMetrics();
if (Date.now() - (state.lastAutoSave || 0) > 5 * 60 * 1000) {
saveState();
state.lastAutoSave = Date.now();
log('Auto-saved training state', 'info');
}
const progress = Math.min(100, (state.totalMoves % 1000) / 10);
const progressBar = document.getElementById('training-progress');
if (progressBar) {
progressBar.style.width = `${progress}%`;
}
}
}, 1000);
}
function stopRealTimeMonitoring() {
if (state.realTimeInterval) {
clearInterval(state.realTimeInterval);
state.realTimeInterval = null;
}
}
// UI Update Functions
function updateAgentParam(agent, param, value) {
if (!state.agentStats[agent]) return;
state.agentStats[agent][param] = value;
if (param === 'learningRate' && state.neuralNetworks[agent]) {
state.neuralNetworks[agent].updateLearningRate(value);
}
const elementId = `${agent}-${param}-value`;
const element = document.getElementById(elementId);
if (element) {
element.textContent = param === 'learningRate' ? value.toFixed(4) : value.toFixed(2);
}
logAgent(agent, `${param} set to ${value}`, 'info');
updateAllMetrics();
}
function updateAllMetrics() {
// Black agent metrics
const blackTotal = Math.max(1, state.agentStats.black.totalGames);
const blackWinRate = (state.agentStats.black.wins / blackTotal * 100).toFixed(1);
const blackLoss = state.agentStats.black.lossHistory.length > 0 ?
state.agentStats.black.lossHistory.slice(-1)[0].toFixed(3) : '0.000';
document.getElementById('black-win-rate').innerHTML = `${blackWinRate}<span class="metric-unit">%</span>`;
document.getElementById('black-loss').textContent = blackLoss;
document.getElementById('black-lr').textContent = state.agentStats.black.learningRate.toFixed(4);
document.getElementById('black-explore').textContent = state.agentStats.black.explorationRate.toFixed(2);
document.getElementById('black-games').textContent = state.agentStats.black.totalGames;
document.getElementById('black-moves').textContent = state.agentStats.black.movesMade;
// Green agent metrics
const greenTotal = Math.max(1, state.agentStats.green.totalGames);
const greenWinRate = (state.agentStats.green.wins / greenTotal * 100).toFixed(1);
const greenLoss = state.agentStats.green.lossHistory.length > 0 ?
state.agentStats.green.lossHistory.slice(-1)[0].toFixed(3) : '0.000';
document.getElementById('green-win-rate').innerHTML = `${greenWinRate}<span class="metric-unit">%</span>`;
document.getElementById('green-loss').textContent = greenLoss;
document.getElementById('green-lr').textContent = state.agentStats.green.learningRate.toFixed(4);
document.getElementById('green-explore').textContent = state.agentStats.green.explorationRate.toFixed(2);
document.getElementById('green-games').textContent = state.agentStats.green.totalGames;
document.getElementById('green-moves').textContent = state.agentStats.green.movesMade;
// Overall training metrics
document.getElementById('total-games').textContent = state.validGames;
document.getElementById('total-moves').textContent = state.totalMoves;
// Training progress
const progress = Math.min(100, (state.totalMoves % 1000) / 10);
const progressBar = document.getElementById('training-progress');
if (progressBar) {
progressBar.style.width = `${progress}%`;
}
}
function startTrainingTimer() {
let seconds = 0;
if (state.trainingTimer) {
clearInterval(state.trainingTimer);
}
state.trainingTimer = setInterval(() => {
seconds++;
const hours = Math.floor(seconds / 3600);
const minutes = Math.floor((seconds % 3600) / 60);
const secs = seconds % 60;
document.getElementById('training-timer').textContent =
`${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
}, 1000);
}
// Logging Functions
function log(message, type = 'info') {
const terminal = document.getElementById('main-terminal');
if (!terminal) return;
const line = document.createElement('div');
line.className = `terminal-line ${type}`;
line.textContent = `[${new Date().toLocaleTimeString()}] ${message}`;
terminal.appendChild(line);
if (terminal.children.length > 100) {
terminal.removeChild(terminal.firstChild);
}
terminal.scrollTop = terminal.scrollHeight;
}
function logAgent(agent, message, type = 'info') {
const logContent = document.getElementById(`${agent}-log-content`);
const logCount = document.getElementById(`${agent}-log-count`);
if (!logContent || !logCount) return;
const line = document.createElement('div');
line.className = `log-line ${type}`;
line.textContent = `[${new Date().toLocaleTimeString()}] ${message}`;
logContent.appendChild(line);
const count = logContent.children.length;
logCount.textContent = count;
if (count > 50) {
logContent.removeChild(logContent.firstChild);
}
logContent.scrollTop = logContent.scrollHeight;
}
// SIMPLIFIED EXPORT FUNCTION - ONE BUTTON EXPORTS EVERYTHING
async function exportEverything() {
log('Starting export of all training data and models...', 'info');
try {
// Create ZIP file
const zip = new JSZip();
// 1. Export all training games
if (state.dataset.length > 0) {
log(`Exporting ${state.dataset.length} training games...`, 'info');
const gamesData = JSON.stringify(state.dataset, null, 2);
zip.file("training_games.json", gamesData);
// Also create a CSV version for compatibility
const csvHeader = "id,pgn,result,moves_count,start_time,end_time,final_fen\n";
const csvRows = state.dataset.map(g =>
`"${g.id}","${g.pgn.replace(/"/g, '""')}","${g.result}",${g.moves_count},"${g.start_time}","${g.end_time}","${g.final_fen}"`
).join('\n');
zip.file("training_games.csv", csvHeader + csvRows);
} else {
log('No training games to export', 'warning');
}
// 2. Export neural network models
log('Exporting neural network models...', 'info');
// Black agent model
if (state.neuralNetworks.black) {
const blackModelData = {
model_type: "chess_policy_network",
color: "black",
architecture: {
input_size: 896,
hidden_layers: [128, 64, 32],
output_size: 1,
activation: ["relu", "relu", "relu", "tanh"],
dropout: 0.2
},
hyperparameters: {
learning_rate: state.agentStats.black.learningRate,
exploration_rate: state.agentStats.black.explorationRate,
discount_factor: state.agentStats.black.discountFactor
},
training_stats: {
wins: state.agentStats.black.wins,
losses: state.agentStats.black.losses,
draws: state.agentStats.black.draws,
total_games: state.agentStats.black.totalGames,
moves_made: state.agentStats.black.movesMade
},
export_timestamp: new Date().toISOString()
};
zip.file("black_agent_model.json", JSON.stringify(blackModelData, null, 2));
}
// Green agent model
if (state.neuralNetworks.green) {
const greenModelData = {
model_type: "chess_value_network",
color: "green",
architecture: {
input_size: 896,
hidden_layers: [128, 64, 32],
output_size: 1,
activation: ["relu", "relu", "relu", "tanh"],
dropout: 0.2
},
hyperparameters: {
learning_rate: state.agentStats.green.learningRate,
exploration_rate: state.agentStats.green.explorationRate,
discount_factor: state.agentStats.green.discountFactor
},
training_stats: {
wins: state.agentStats.green.wins,
losses: state.agentStats.green.losses,
draws: state.agentStats.green.draws,
total_games: state.agentStats.green.totalGames,
moves_made: state.agentStats.green.movesMade
},
export_timestamp: new Date().toISOString()
};
zip.file("green_agent_model.json", JSON.stringify(greenModelData, null, 2));
}
// 3. Export training statistics
log('Exporting training statistics...', 'info');
const statsData = {
training_summary: {
total_games: state.validGames,
total_moves: state.totalMoves,
training_time: document.getElementById('training-timer').textContent,
current_game: state.currentGameNumber,
training_active: state.trainingActive
},
agent_comparison: {
black_win_rate: ((state.agentStats.black.wins / Math.max(1, state.agentStats.black.totalGames)) * 100).toFixed(1),
green_win_rate: ((state.agentStats.green.wins / Math.max(1, state.agentStats.green.totalGames)) * 100).toFixed(1),
draws_rate: ((state.agentStats.black.draws / Math.max(1, state.agentStats.black.totalGames)) * 100).toFixed(1)
},
export_timestamp: new Date().toISOString(),
system_info: {
user_agent: navigator.userAgent,
platform: navigator.platform,
screen_resolution: `${window.screen.width}x${window.screen.height}`
}
};
zip.file("training_statistics.json", JSON.stringify(statsData, null, 2));
// 4. Create README file
log('Creating documentation...', 'info');
const readme = `# Chess RL Training Export
## Export Information
- Export Date: ${new Date().toISOString()}
- Total Training Games: ${state.validGames}
- Total Moves: ${state.totalMoves}
- Training Time: ${document.getElementById('training-timer').textContent}
## Files Included
### 1. training_games.json
Complete dataset of all chess games played during training. Each game includes:
- Full PGN notation
- Move-by-move records
- Game result and metadata
- Agent parameters for each game
### 2. training_games.csv
Same data as JSON but in CSV format for easy import into spreadsheets or databases.
### 3. black_agent_model.json
Black Agent (Policy Network) configuration and statistics:
- Neural network architecture
- Hyperparameters (learning rate, exploration rate, etc.)
- Training statistics (wins, losses, draws)
- Model metadata
### 4. green_agent_model.json
Green Agent (Value Network) configuration and statistics:
- Neural network architecture
- Hyperparameters
- Training statistics
- Model metadata
### 5. training_statistics.json
Overall training summary and statistics including:
- Training duration
- Win rates for both agents
- System information
- Export metadata
## Training System
Generated by ANN Chess RL Trainer v3.0 - A web-based reinforcement learning system for chess AI development.
## Usage
These files can be used to:
- Continue training from this point
- Analyze the learning progress
- Import into other machine learning frameworks
- Share with the research community
## Notes
- All data is in standard JSON/CSV formats
- Compatible with Hugging Face datasets
- Can be compressed with GZIP, ZSTD, BZ2, LZ4, or LZMA for upload
`;
zip.file("README.md", readme);
// 5. Generate and download ZIP
log('Generating ZIP archive...', 'info');
const content = await zip.generateAsync({
type: "blob",
compression: "DEFLATE",
compressionOptions: {
level: 6
}
});
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
const filename = `chess_rl_training_export_${timestamp}.zip`;
const url = URL.createObjectURL(content);
const a = document.createElement('a');
a.href = url;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
log(`Export completed successfully! File: ${filename}`, 'success');
log('The ZIP file contains all training data, models, and statistics.', 'info');
} catch (error) {
console.error('Export error:', error);
log(`Export failed: ${error.message}`, 'error');
}
}
function saveState() {
const stateData = {
agentStats: state.agentStats,
dataset: state.dataset.slice(-50),
validGames: state.validGames,
totalMoves: state.totalMoves,
currentGame: {
number: state.currentGameNumber,
fen: state.game ? state.game.fen() : null
},
timestamp: new Date().toISOString(),
training_time: document.getElementById('training-timer').textContent
};
const blob = new Blob([JSON.stringify(stateData, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `chess_rl_state_${Date.now()}.json`;
a.click();
URL.revokeObjectURL(url);
state.lastAutoSave = Date.now();
log('Training state saved locally', 'success');
}
// Initialization
async function initializeNeuralNetworks() {
log('Initializing neural networks...', 'info');
state.neuralNetworks.black = new ChessNeuralNetwork('BlackAgent', 'black', state.agentStats.black.learningRate);
state.neuralNetworks.green = new ChessNeuralNetwork('GreenAgent', 'green', state.agentStats.green.learningRate);
state.agents.black = new RLAgent('black', state.neuralNetworks.black);
state.agents.green = new RLAgent('green', state.neuralNetworks.green);
state.game = new Chess();
visualizeNeuralNetwork('black-nn-viz', 'black');
visualizeNeuralNetwork('green-nn-viz', 'green');
log('Neural networks initialized successfully', 'success');
logAgent('black', 'Neural network ready', 'success');
logAgent('green', 'Neural network ready', 'success');
}
async function init() {
const loadingBar = document.querySelector('.loading-bar');
const loadingDetails = document.getElementById('loading-details');
const steps = [
'Loading TensorFlow.js...',
'Initializing neural networks...',
'Setting up RL agents...',
'Preparing visualization...',
'Starting training environment...'
];
for (let i = 0; i < steps.length; i++) {
if (loadingDetails) {
loadingDetails.textContent = steps[i];
}
if (loadingBar) {
loadingBar.style.width = `${((i + 1) / steps.length) * 100}%`;
}
await new Promise(resolve => setTimeout(resolve, 300));
}
try {
await initializeNeuralNetworks();
updateQuantumBackground();
renderChessBoard();
updateAllMetrics();
document.getElementById('loading-screen').style.display = 'none';
document.querySelector('.container').style.opacity = '1';
log('ANN Chess RL Trainer v3.0 ready!', 'success');
log('Left: Black Agent (Policy Network)', 'info');
log('Right: Green Agent (Value Network)', 'info');
log('Click "Start Training" to begin reinforcement learning', 'info');
log('Click "Export All" to download everything as a ZIP file', 'info');
log('ZIP includes: games, models, statistics in JSON/CSV formats', 'info');
} catch (error) {
log(`Initialization failed: ${error.message}`, 'error');
console.error(error);
}
}
// Initialize on load
window.addEventListener('load', init);
window.addEventListener('resize', updateQuantumBackground);
</script>
</body>
</html>