diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..bc16abf3ee36e07f27029ae399b2855eb83be1f6
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,15 @@
+node_modules
+dist
+build
+.git
+.gitignore
+venv
+env
+__pycache__
+*.pyc
+*.pyo
+*.pyd
+.DS_Store
+.env
+site-packages
+.gemini
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..8c28b49dbdba5f5f6425cdbd3553f6f202e4d051
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,69 @@
+# Multi-stage Dockerfile for Neural Network Quantizer
+# Builds frontend and serves with FastAPI backend
+
+# ============================================
+# Stage 1: Build Frontend
+# ============================================
+FROM node:20-alpine AS frontend-build
+
+WORKDIR /app/frontend
+
+# Copy package files
+COPY frontend/package*.json ./
+
+# Install dependencies
+RUN npm ci
+
+# Copy frontend source
+COPY frontend/ ./
+
+# Build production bundle
+RUN npm run build
+
+# ============================================
+# Stage 2: Python Backend + Frontend
+# ============================================
+FROM python:3.11-slim
+
+# Set environment variables
+ENV PYTHONDONTWRITEBYTECODE=1
+ENV PYTHONUNBUFFERED=1
+ENV GRADIO_SERVER_NAME=0.0.0.0
+ENV GRADIO_SERVER_PORT=7860
+
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Copy backend requirements
+COPY backend/requirements.txt ./requirements.txt
+
+# Install Python dependencies
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy backend code
+COPY backend/ ./backend/
+
+# Copy frontend build
+COPY --from=frontend-build /app/frontend/dist ./frontend/dist
+
+# Copy HuggingFace Spaces entry point
+COPY app.py ./
+
+# Create non-root user
+RUN useradd -m -u 1000 user
+USER user
+
+# Expose port
+EXPOSE 7860
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
+ CMD curl -f http://localhost:7860/api/health || exit 1
+
+# Start the application
+CMD ["python", "-m", "uvicorn", "backend.api.main:app", "--host", "0.0.0.0", "--port", "7860"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1246373f132c02578c69cfb206ec6b48cdd406fb
--- /dev/null
+++ b/README.md
@@ -0,0 +1,59 @@
+---
+title: Neural Network Quantizer
+emoji: ⚡
+colorFrom: indigo
+colorTo: purple
+sdk: docker
+pinned: false
+license: mit
+app_port: 7860
+---
+
+# Neural Network Weight Quantizer
+
+Quantize neural network weights to lower precision formats (INT8, INT4, NF4) with interactive visualizations.
+
+## Features
+
+- 🔢 Multi-bit quantization (4-bit, 8-bit)
+- 📊 Interactive weight visualizations
+- 🤗 HuggingFace model support (optional)
+- ⚡ GPU acceleration (when available)
+- 📈 Quantization error analysis
+- 🔄 Method comparison (INT8 vs INT4 vs NF4)
+
+## Quick Start
+
+1. Use the **Quantizer** tab to test on random weights
+2. Compare different methods in the **Analysis** tab
+3. Optionally load a HuggingFace model in the **Models** tab
+
+## API
+
+The backend exposes a REST API at `/api`:
+
+- `GET /api/system/info` - System capabilities
+- `POST /api/quantize/weights` - Quantize custom weights
+- `POST /api/models/load` - Load HuggingFace model
+- `POST /api/analysis/compare` - Compare methods
+
+## 🚀 Deployment
+
+### Hugging Face Spaces
+This project is configured for **Hugging Face Spaces** using the Docker SDK.
+
+1. Create a new Space on [Hugging Face](https://huggingface.co/new-space).
+2. Select **Docker** as the SDK.
+3. Push this repository to your Space:
+ ```bash
+ git remote add space https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME
+ git push space main
+ ```
+
+### Docker
+Run locally with Docker:
+```bash
+docker build -t quantizer .
+docker run -p 7860:7860 quantizer
+```
+Open `http://localhost:7860`.
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..89e49d51e6c24b8dde8265eee3cbb022480161e4
--- /dev/null
+++ b/app.py
@@ -0,0 +1,16 @@
+"""
+HuggingFace Spaces Entry Point
+This file serves as the entry point for HuggingFace Spaces deployment.
+It starts the FastAPI application which serves both the API and the React frontend.
+"""
+
+import uvicorn
+from backend.api.main import app
+
+if __name__ == "__main__":
+ uvicorn.run(
+ app,
+ host="0.0.0.0",
+ port=7860,
+ log_level="info"
+ )
\ No newline at end of file
diff --git a/backend/__init__.py b/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..80250dd9514959c99820e4f2e5f7fd9d57e5e0a0
--- /dev/null
+++ b/backend/__init__.py
@@ -0,0 +1 @@
+"""Backend package init"""
diff --git a/backend/__pycache__/__init__.cpython-312.pyc b/backend/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e93521e23e04b7733c5ad292fea8df77459707b
Binary files /dev/null and b/backend/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/api/__init__.py b/backend/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2752d72bfc2accd83e5762ba2c5efb2a9cc2cb4d
--- /dev/null
+++ b/backend/api/__init__.py
@@ -0,0 +1,6 @@
+"""
+API Package Init
+"""
+from .main import app
+
+__all__ = ["app"]
diff --git a/backend/api/__pycache__/__init__.cpython-312.pyc b/backend/api/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f57441b0a4750aad6e44ccb07e5f03116694e8e
Binary files /dev/null and b/backend/api/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/api/__pycache__/main.cpython-312.pyc b/backend/api/__pycache__/main.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1642e978f9ff9ba094d95ea305897e62604ffe7b
Binary files /dev/null and b/backend/api/__pycache__/main.cpython-312.pyc differ
diff --git a/backend/api/main.py b/backend/api/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..fea438083efcbd3ce3fa5368adf6547cda1b4301
--- /dev/null
+++ b/backend/api/main.py
@@ -0,0 +1,61 @@
+"""
+FastAPI Main Application
+Neural Network Weight Quantizer API
+"""
+
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
+from fastapi.responses import FileResponse
+from pathlib import Path
+import os
+
+from .routes import quantization, models, analysis, system
+
+# Create FastAPI app
+app = FastAPI(
+ title="Neural Network Quantizer API",
+ description="API for quantizing neural network weights to lower precision formats",
+ version="1.0.0",
+ docs_url="/api/docs",
+ openapi_url="/api/openapi.json"
+)
+
+# CORS configuration
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"], # Configure appropriately in production
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# Include routers
+app.include_router(system.router, prefix="/api/system", tags=["System"])
+app.include_router(models.router, prefix="/api/models", tags=["Models"])
+app.include_router(quantization.router, prefix="/api/quantize", tags=["Quantization"])
+app.include_router(analysis.router, prefix="/api/analysis", tags=["Analysis"])
+
+# Health check
+@app.get("/api/health")
+async def health_check():
+ return {"status": "healthy", "service": "quantizer-api"}
+
+# Serve frontend in production
+FRONTEND_DIR = Path(__file__).parent.parent.parent / "frontend" / "dist"
+
+if FRONTEND_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=FRONTEND_DIR / "assets"), name="assets")
+
+ @app.get("/{full_path:path}")
+ async def serve_frontend(full_path: str):
+ # Serve index.html for SPA routing
+ file_path = FRONTEND_DIR / full_path
+ if file_path.exists() and file_path.is_file():
+ return FileResponse(file_path)
+ return FileResponse(FRONTEND_DIR / "index.html")
+
+
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
diff --git a/backend/api/routes/__init__.py b/backend/api/routes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a52617ffb189b38b57704ced9cc0bcc7dd93449
--- /dev/null
+++ b/backend/api/routes/__init__.py
@@ -0,0 +1 @@
+"""Routes package"""
diff --git a/backend/api/routes/__pycache__/__init__.cpython-312.pyc b/backend/api/routes/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e4999b159609ec87ea58e41175108b42d1a434d
Binary files /dev/null and b/backend/api/routes/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/api/routes/__pycache__/analysis.cpython-312.pyc b/backend/api/routes/__pycache__/analysis.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8ceb9a8d341db74ce75ea0b99db892095052b08
Binary files /dev/null and b/backend/api/routes/__pycache__/analysis.cpython-312.pyc differ
diff --git a/backend/api/routes/__pycache__/models.cpython-312.pyc b/backend/api/routes/__pycache__/models.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b5712762be523e40cd37498d68affe493541936
Binary files /dev/null and b/backend/api/routes/__pycache__/models.cpython-312.pyc differ
diff --git a/backend/api/routes/__pycache__/quantization.cpython-312.pyc b/backend/api/routes/__pycache__/quantization.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ea46c4b39888d9107a6c901857b5105e5dc01276
Binary files /dev/null and b/backend/api/routes/__pycache__/quantization.cpython-312.pyc differ
diff --git a/backend/api/routes/__pycache__/system.cpython-312.pyc b/backend/api/routes/__pycache__/system.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..993bddd15afbb91abacc8454456a60a5bf3a2aec
Binary files /dev/null and b/backend/api/routes/__pycache__/system.cpython-312.pyc differ
diff --git a/backend/api/routes/analysis.py b/backend/api/routes/analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..76d8e624407f5814317c8673db1583112ac87e38
--- /dev/null
+++ b/backend/api/routes/analysis.py
@@ -0,0 +1,249 @@
+"""
+Analysis Routes
+Weight analysis and visualization endpoints
+"""
+
+from fastapi import APIRouter, HTTPException
+from pydantic import BaseModel
+from typing import Optional, Dict, Any, List
+import torch
+
+from backend.core.model_loader import model_loader
+from backend.core.visualization import visualizer
+from backend.core.quantizer import (
+ QuantizationConfig, QuantizationMethod, QuantizationMode,
+ get_quantizer
+)
+
+router = APIRouter()
+
+
+class AnalyzeLayerRequest(BaseModel):
+ """Request to analyze a specific layer"""
+ layer_name: str
+
+
+class CompareQuantizationRequest(BaseModel):
+ """Compare different quantization methods on same weights"""
+ layer_name: Optional[str] = None
+ in_features: int = 64
+ out_features: int = 128
+ methods: List[str] = ["int8", "int4", "nf4"]
+
+
+@router.get("/weights/{layer_name}")
+async def get_weight_analysis(layer_name: str) -> Dict[str, Any]:
+ """
+ Get detailed weight analysis for a specific layer.
+ """
+ if model_loader is None or model_loader.get_model() is None:
+ raise HTTPException(status_code=404, detail="No model loaded")
+
+ weights = model_loader.get_layer_weights(layer_name)
+ if weights is None:
+ raise HTTPException(status_code=404, detail=f"Layer not found: {layer_name}")
+
+ # Flatten for analysis
+ flat = weights.flatten()
+
+ # Statistics
+ stats = {
+ "shape": list(weights.shape),
+ "dtype": str(weights.dtype),
+ "num_params": int(weights.numel()),
+ "memory_mb": weights.numel() * weights.element_size() / (1024 * 1024),
+ "min": float(weights.min()),
+ "max": float(weights.max()),
+ "mean": float(weights.mean()),
+ "std": float(weights.std()),
+ "median": float(torch.median(flat)),
+ "sparsity": float((weights == 0).sum() / weights.numel()),
+ "abs_mean": float(weights.abs().mean()),
+ "percentiles": {
+ "1%": float(torch.quantile(flat.float(), 0.01)),
+ "5%": float(torch.quantile(flat.float(), 0.05)),
+ "25%": float(torch.quantile(flat.float(), 0.25)),
+ "50%": float(torch.quantile(flat.float(), 0.50)),
+ "75%": float(torch.quantile(flat.float(), 0.75)),
+ "95%": float(torch.quantile(flat.float(), 0.95)),
+ "99%": float(torch.quantile(flat.float(), 0.99))
+ }
+ }
+
+ # Visualizations
+ heatmap = visualizer.to_dict(
+ visualizer.weight_heatmap(weights, f"Weights: {layer_name}")
+ )
+ histogram = visualizer.to_dict(
+ visualizer.weight_histogram(weights, "Weight Distribution")
+ )
+
+ return {
+ "layer_name": layer_name,
+ "stats": stats,
+ "visualizations": {
+ "heatmap": heatmap,
+ "histogram": histogram
+ }
+ }
+
+
+@router.post("/compare")
+async def compare_quantization_methods(request: CompareQuantizationRequest) -> Dict[str, Any]:
+ """
+ Compare multiple quantization methods on the same weights.
+ """
+ # Get or generate weights
+ if request.layer_name and model_loader and model_loader.get_model():
+ weights = model_loader.get_layer_weights(request.layer_name)
+ if weights is None:
+ raise HTTPException(status_code=404, detail=f"Layer not found: {request.layer_name}")
+ source = f"layer:{request.layer_name}"
+ else:
+ weights = torch.randn(request.out_features, request.in_features)
+ source = "random"
+
+ # Ensure 2D
+ if len(weights.shape) == 1:
+ weights = weights.unsqueeze(0)
+ elif len(weights.shape) > 2:
+ weights = weights.reshape(weights.shape[0], -1)
+
+ # Compare methods
+ method_map = {
+ "int8": QuantizationMethod.INT8,
+ "int4": QuantizationMethod.INT4,
+ "nf4": QuantizationMethod.NF4
+ }
+
+ comparison = []
+
+ for method_name in request.methods:
+ if method_name not in method_map:
+ continue
+
+ config = QuantizationConfig(
+ bits=8 if method_name == "int8" else 4,
+ method=method_map[method_name],
+ group_size=128 if method_name in ["int4", "nf4"] else None
+ )
+
+ try:
+ quantizer = get_quantizer(config)
+ result = quantizer.quantize(weights)
+
+ comparison.append({
+ "method": method_name,
+ "bits": config.bits,
+ "max_error": result.max_error,
+ "mean_error": result.mean_error,
+ "memory_savings_percent": result.memory_savings_percent,
+ "histogram": visualizer.to_dict(
+ visualizer.weight_histogram(
+ result.quantized_weights.float(),
+ f"{method_name.upper()} Distribution"
+ )
+ )
+ })
+ except Exception as e:
+ comparison.append({
+ "method": method_name,
+ "error": str(e)
+ })
+
+ return {
+ "source": source,
+ "original_shape": list(weights.shape),
+ "original_stats": {
+ "min": float(weights.min()),
+ "max": float(weights.max()),
+ "mean": float(weights.mean()),
+ "std": float(weights.std())
+ },
+ "comparison": comparison
+ }
+
+
+@router.get("/model-summary")
+async def get_model_summary() -> Dict[str, Any]:
+ """
+ Get summary statistics for all layers in loaded model.
+ """
+ if model_loader is None or model_loader.get_model() is None:
+ raise HTTPException(status_code=404, detail="No model loaded")
+
+ model_info = model_loader.get_model_info()
+ if model_info is None:
+ raise HTTPException(status_code=500, detail="Failed to get model info")
+
+ # Analyze each layer
+ layer_stats = []
+ total_params = 0
+ quantizable_params = 0
+
+ for layer in model_info.layers:
+ total_params += layer.num_params
+ if layer.is_quantizable:
+ quantizable_params += layer.num_params
+
+ layer_stats.append({
+ "name": layer.name,
+ "type": layer.module_type,
+ "params": layer.num_params,
+ "params_mb": layer.num_params * 4 / (1024 * 1024), # Assuming FP32
+ "quantizable": layer.is_quantizable
+ })
+
+ # Sort by parameter count
+ layer_stats.sort(key=lambda x: x["params"], reverse=True)
+
+ return {
+ "model_name": model_info.name,
+ "architecture": model_info.architecture,
+ "total_params": total_params,
+ "total_params_billions": total_params / 1e9,
+ "quantizable_params": quantizable_params,
+ "quantizable_percent": quantizable_params / total_params * 100 if total_params > 0 else 0,
+ "memory_fp32_gb": total_params * 4 / (1024**3),
+ "memory_int8_estimate_gb": quantizable_params * 1 / (1024**3) + (total_params - quantizable_params) * 4 / (1024**3),
+ "memory_int4_estimate_gb": quantizable_params * 0.5 / (1024**3) + (total_params - quantizable_params) * 4 / (1024**3),
+ "top_layers": layer_stats[:20] # Top 20 largest layers
+ }
+
+
+@router.get("/outliers/{layer_name}")
+async def detect_outliers(layer_name: str, threshold: float = 3.0) -> Dict[str, Any]:
+ """
+ Detect outlier weights that may cause quantization issues.
+ """
+ if model_loader is None or model_loader.get_model() is None:
+ raise HTTPException(status_code=404, detail="No model loaded")
+
+ weights = model_loader.get_layer_weights(layer_name)
+ if weights is None:
+ raise HTTPException(status_code=404, detail=f"Layer not found: {layer_name}")
+
+ flat = weights.flatten()
+ mean = flat.mean()
+ std = flat.std()
+
+ # Find outliers (values beyond threshold * std from mean)
+ outlier_mask = (flat - mean).abs() > threshold * std
+ num_outliers = outlier_mask.sum().item()
+ outlier_values = flat[outlier_mask].tolist()[:100] # Limit to 100
+
+ return {
+ "layer_name": layer_name,
+ "threshold": threshold,
+ "total_weights": int(flat.numel()),
+ "num_outliers": num_outliers,
+ "outlier_percent": num_outliers / flat.numel() * 100,
+ "mean": float(mean),
+ "std": float(std),
+ "outlier_range": {
+ "below": float(mean - threshold * std),
+ "above": float(mean + threshold * std)
+ },
+ "sample_outliers": outlier_values,
+ "recommendation": "Consider clipping or mixed-precision for this layer" if num_outliers > flat.numel() * 0.01 else "Layer is suitable for quantization"
+ }
diff --git a/backend/api/routes/models.py b/backend/api/routes/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..437cdb73aaa66ae9d434ca2e4effb8b68f1e6b89
--- /dev/null
+++ b/backend/api/routes/models.py
@@ -0,0 +1,411 @@
+"""
+Model Routes with Download Progress Streaming
+Supports HuggingFace Spaces with proper cache management
+"""
+
+from fastapi import APIRouter, HTTPException, BackgroundTasks
+from fastapi.responses import StreamingResponse
+from pydantic import BaseModel
+from typing import Optional, Dict, Any, List
+import torch
+import asyncio
+import json
+import traceback
+import time
+from backend.core.model_loader import model_loader
+
+from backend.core.model_manager import (
+ get_download_progress, set_download_progress, clear_download_progress,
+ get_cached_models, cleanup_old_models, delete_model_cache,
+ get_cache_stats, ensure_sample_models, start_cleanup_scheduler,
+ SAMPLE_MODELS
+)
+
+router = APIRouter()
+
+
+class LoadModelRequest(BaseModel):
+ """Request to load a model"""
+ model_name: str
+ dtype: str = "auto"
+ device: str = "auto"
+ trust_remote_code: bool = True
+
+
+class DeleteModelRequest(BaseModel):
+ """Request to delete a cached model"""
+ model_name: str
+
+
+# In-memory state
+_loaded_model = None
+_loaded_tokenizer = None
+_model_name = None
+
+# Start cleanup scheduler on module load
+start_cleanup_scheduler()
+
+
+def _get_device():
+ """Get best available device"""
+ if torch.cuda.is_available():
+ return "cuda"
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
+ return "mps"
+ return "cpu"
+
+
+def _get_torch_dtype(dtype_str: str, device: str):
+ """Convert dtype string to torch dtype"""
+ if dtype_str == "auto":
+ if device == "cuda":
+ return torch.float16
+ return torch.float32
+
+ dtype_map = {
+ "fp32": torch.float32,
+ "float32": torch.float32,
+ "fp16": torch.float16,
+ "float16": torch.float16,
+ "bf16": torch.bfloat16,
+ "bfloat16": torch.bfloat16,
+ }
+ return dtype_map.get(dtype_str, torch.float32)
+
+
+async def _load_model_with_progress(model_name: str, dtype: str, device: str, trust_remote_code: bool):
+ """Load model and yield progress updates"""
+ global _loaded_model, _loaded_tokenizer, _model_name
+
+ try:
+ from transformers import AutoModel, AutoTokenizer, AutoConfig
+ except ImportError:
+ yield {"type": "error", "error": "transformers library not installed"}
+ return
+
+ try:
+ # Phase 1: Fetching config
+ yield {"type": "progress", "phase": "config", "percent": 5, "message": "Fetching model configuration..."}
+
+ try:
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
+ except Exception as e:
+ yield {"type": "error", "error": f"Model not found: {str(e)}", "suggestion": "Check the model ID is correct"}
+ return
+
+ # Phase 2: Determine device and dtype
+ actual_device = device if device != "auto" else _get_device()
+ torch_dtype = _get_torch_dtype(dtype, actual_device)
+
+ yield {"type": "progress", "phase": "download", "percent": 10, "message": f"Downloading model to {actual_device}..."}
+
+ # Set download progress for polling
+ set_download_progress(model_name, {
+ "status": "downloading",
+ "percent": 10,
+ "message": "Downloading model files..."
+ })
+
+ # Phase 3: Download and load model
+ try:
+ model = AutoModel.from_pretrained(
+ model_name,
+ torch_dtype=torch_dtype,
+ trust_remote_code=trust_remote_code,
+ low_cpu_mem_usage=True
+ )
+ yield {"type": "progress", "phase": "download", "percent": 70, "message": "Model downloaded successfully"}
+ except Exception as e:
+ # Try without low_cpu_mem_usage
+ try:
+ model = AutoModel.from_pretrained(
+ model_name,
+ torch_dtype=torch_dtype,
+ trust_remote_code=trust_remote_code
+ )
+ yield {"type": "progress", "phase": "download", "percent": 70, "message": "Model downloaded (fallback mode)"}
+ except Exception as e2:
+ yield {"type": "error", "error": f"Failed to load model: {str(e2)}"}
+ clear_download_progress(model_name)
+ return
+
+ # Phase 4: Move to device
+ yield {"type": "progress", "phase": "device", "percent": 80, "message": f"Moving model to {actual_device}..."}
+
+ if actual_device != "cpu" and not hasattr(model, 'hf_device_map'):
+ try:
+ model = model.to(actual_device)
+ except Exception:
+ actual_device = "cpu"
+ model = model.to("cpu")
+
+ model.eval()
+
+ # Phase 5: Load tokenizer
+ yield {"type": "progress", "phase": "tokenizer", "percent": 90, "message": "Loading tokenizer..."}
+
+ try:
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=trust_remote_code)
+ except Exception:
+ tokenizer = None
+
+ # Store in memory
+ _loaded_model = model
+ _loaded_tokenizer = tokenizer
+ _model_name = model_name
+
+ # Sync with global model loader
+ if model_loader:
+ model_loader.register_model(model, model_name, tokenizer)
+
+ # Compute model info
+ num_params = sum(p.numel() for p in model.parameters())
+ memory_mb = sum(p.numel() * p.element_size() for p in model.parameters()) / (1024 * 1024)
+
+ quantizable_layers = []
+ for name, module in model.named_modules():
+ if any(t in module.__class__.__name__ for t in ["Linear", "Conv1d", "Conv2d"]):
+ quantizable_layers.append(name)
+
+ # Phase 6: Complete
+ clear_download_progress(model_name)
+
+ yield {
+ "type": "complete",
+ "percent": 100,
+ "model_info": {
+ "name": model_name,
+ "architecture": model.config.architectures[0] if hasattr(model.config, 'architectures') and model.config.architectures else "Unknown",
+ "num_params": num_params,
+ "num_params_millions": round(num_params / 1e6, 2),
+ "memory_mb": round(memory_mb, 2),
+ "device": str(next(model.parameters()).device),
+ "dtype": str(next(model.parameters()).dtype),
+ "num_quantizable_layers": len(quantizable_layers),
+ "has_tokenizer": tokenizer is not None,
+ "is_sample": model_name in SAMPLE_MODELS
+ }
+ }
+
+ except Exception as e:
+ clear_download_progress(model_name)
+ yield {"type": "error", "error": str(e), "traceback": traceback.format_exc()}
+
+
+@router.post("/load")
+async def load_model(request: LoadModelRequest) -> Dict[str, Any]:
+ """Load a model (non-streaming version for simple requests)"""
+ result = None
+ async for update in _load_model_with_progress(
+ request.model_name, request.dtype, request.device, request.trust_remote_code
+ ):
+ result = update
+
+ if result and result.get("type") == "complete":
+ return {"success": True, "model_info": result["model_info"]}
+ elif result and result.get("type") == "error":
+ return {"success": False, "error": result.get("error"), "suggestion": result.get("suggestion")}
+ else:
+ return {"success": False, "error": "Unknown error"}
+
+
+@router.post("/load/stream")
+async def load_model_stream(request: LoadModelRequest):
+ """Load a model with Server-Sent Events for progress updates"""
+
+ async def event_generator():
+ async for update in _load_model_with_progress(
+ request.model_name, request.dtype, request.device, request.trust_remote_code
+ ):
+ yield f"data: {json.dumps(update)}\n\n"
+ await asyncio.sleep(0.1) # Small delay between events
+
+ return StreamingResponse(
+ event_generator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ }
+ )
+
+
+@router.get("/progress/{model_name}")
+async def get_model_progress(model_name: str) -> Dict[str, Any]:
+ """Get download progress for a model (polling endpoint)"""
+ progress = get_download_progress(model_name)
+ if progress:
+ return {"downloading": True, **progress}
+ return {"downloading": False}
+
+
+@router.get("/status")
+async def get_loading_status() -> Dict[str, Any]:
+ """Get current model loading status"""
+ return {
+ "model_loaded": _loaded_model is not None,
+ "model_name": _model_name,
+ "has_tokenizer": _loaded_tokenizer is not None
+ }
+
+
+@router.get("/info")
+async def get_model_info() -> Dict[str, Any]:
+ """Get information about the currently loaded model"""
+ if _loaded_model is None:
+ return {"loaded": False, "message": "No model loaded"}
+
+ num_params = sum(p.numel() for p in _loaded_model.parameters())
+ memory_mb = sum(p.numel() * p.element_size() for p in _loaded_model.parameters()) / (1024 * 1024)
+
+ return {
+ "loaded": True,
+ "name": _model_name,
+ "num_params": num_params,
+ "num_params_millions": round(num_params / 1e6, 2),
+ "memory_mb": round(memory_mb, 2),
+ "device": str(next(_loaded_model.parameters()).device),
+ "dtype": str(next(_loaded_model.parameters()).dtype)
+ }
+
+
+@router.get("/layers")
+async def get_layers() -> Dict[str, Any]:
+ """Get list of layers in the loaded model"""
+ if _loaded_model is None:
+ return {"error": "No model loaded", "layers": []}
+
+ layers = []
+ quantizable_names = []
+
+ for name, module in _loaded_model.named_modules():
+ if not name:
+ continue
+
+ module_type = module.__class__.__name__
+ is_quantizable = any(t in module_type for t in ["Linear", "Conv1d", "Conv2d", "Embedding"])
+
+ shape = None
+ num_params = 0
+ if hasattr(module, 'weight') and module.weight is not None:
+ shape = list(module.weight.shape)
+ num_params = module.weight.numel()
+
+ if num_params > 0:
+ layers.append({
+ "name": name,
+ "type": module_type,
+ "shape": shape,
+ "params": num_params,
+ "quantizable": is_quantizable
+ })
+
+ if is_quantizable:
+ quantizable_names.append(name)
+
+ return {
+ "total_layers": len(layers),
+ "quantizable_count": len(quantizable_names),
+ "quantizable_layers": quantizable_names,
+ "layers": layers
+ }
+
+
+@router.post("/unload")
+async def unload_model() -> Dict[str, Any]:
+ """Unload the current model and free memory"""
+ global _loaded_model, _loaded_tokenizer, _model_name
+
+ if _loaded_model is not None:
+ del _loaded_model
+ _loaded_model = None
+
+ if _loaded_tokenizer is not None:
+ del _loaded_tokenizer
+ _loaded_tokenizer = None
+
+ _model_name = None
+
+ # Sync with global module loader
+ if model_loader:
+ model_loader.unload()
+
+ import gc
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+
+ return {"success": True, "message": "Model unloaded"}
+
+
+# ============================================
+# Cache Management Endpoints
+# ============================================
+
+@router.get("/cache")
+async def get_cache_info() -> Dict[str, Any]:
+ """Get information about cached models"""
+ return get_cache_stats()
+
+
+@router.post("/cache/cleanup")
+async def trigger_cleanup(hours: float = 4.0) -> Dict[str, Any]:
+ """Manually trigger cache cleanup"""
+ result = cleanup_old_models(hours)
+ return {
+ "success": True,
+ "deleted_count": len(result["deleted"]),
+ "kept_count": len(result["kept"]),
+ **result
+ }
+
+
+@router.delete("/cache/{model_name:path}")
+async def delete_cached_model(model_name: str) -> Dict[str, Any]:
+ """Delete a specific model from cache"""
+ if model_name in SAMPLE_MODELS:
+ return {"success": False, "error": "Cannot delete sample models"}
+
+ success = delete_model_cache(model_name)
+ return {"success": success, "model_name": model_name}
+
+
+# ============================================
+# Example Models
+# ============================================
+
+@router.get("/examples")
+async def get_example_models() -> Dict[str, Any]:
+ """Get list of example models for testing"""
+ return {
+ "sample_models": [
+ {"id": model, "is_default": True, "description": "Pre-cached for quick testing"}
+ for model in SAMPLE_MODELS
+ ],
+ "small_models": [
+ {"id": "gpt2", "size": "124M", "description": "GPT-2 base model"},
+ {"id": "distilbert-base-uncased", "size": "66M", "description": "DistilBERT for NLP"},
+ {"id": "prajjwal1/bert-tiny", "size": "4.4M", "description": "Tiny BERT for testing"},
+ {"id": "microsoft/DialoGPT-small", "size": "124M", "description": "Small conversational model"},
+ ],
+ "medium_models": [
+ {"id": "gpt2-medium", "size": "355M", "description": "GPT-2 medium"},
+ {"id": "bert-base-uncased", "size": "110M", "description": "BERT base model"},
+ ],
+ "cleanup_policy": f"Non-sample models are deleted after {4} hours of inactivity",
+ "note": "Sample models are always available for quick testing"
+ }
+
+
+# Helper functions for other routes
+def get_loaded_model():
+ return _loaded_model
+
+
+def get_layer_weights_tensor(layer_name: str):
+ if _loaded_model is None:
+ return None
+ for name, module in _loaded_model.named_modules():
+ if name == layer_name and hasattr(module, 'weight'):
+ return module.weight.data.clone()
+ return None
diff --git a/backend/api/routes/quantization.py b/backend/api/routes/quantization.py
new file mode 100644
index 0000000000000000000000000000000000000000..21fe775a6123ba6e3c5a621607545f56ce353bd8
--- /dev/null
+++ b/backend/api/routes/quantization.py
@@ -0,0 +1,366 @@
+"""
+Quantization Routes
+Core quantization API endpoints
+"""
+
+from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
+from pydantic import BaseModel
+from typing import Optional, Dict, Any, List
+import torch
+import asyncio
+import json
+
+from backend.core.quantizer import (
+ QuantizationConfig, QuantizationMethod, QuantizationMode,
+ INT8Quantizer, INT4Quantizer, NF4Quantizer, get_quantizer
+)
+from backend.core.model_loader import model_loader
+from backend.core.visualization import visualizer
+
+router = APIRouter()
+
+
+class QuantizeWeightsRequest(BaseModel):
+ """Request to quantize custom weights"""
+ in_features: int = 64
+ out_features: int = 128
+ bits: int = 8 # 4 or 8
+ method: str = "int8" # int8, int4, nf4
+ mode: str = "symmetric" # symmetric, asymmetric
+ group_size: Optional[int] = None
+ weight_pattern: str = "random" # random, eye, ones, alternating, gradient
+ dtype: str = "float32"
+
+
+class QuantizeLayerRequest(BaseModel):
+ """Request to quantize a specific layer from loaded model"""
+ layer_name: str
+ bits: int = 8
+ method: str = "int8"
+ mode: str = "symmetric"
+ group_size: Optional[int] = None
+
+
+class QuantizeModelRequest(BaseModel):
+ """Request to quantize entire model"""
+ bits: int = 8
+ method: str = "int8"
+ mode: str = "symmetric"
+ group_size: Optional[int] = None
+ layers_to_skip: List[str] = []
+ layers_to_include: Optional[List[str]] = None # None = all quantizable
+
+
+def _generate_weights(pattern: str, out_features: int, in_features: int,
+ dtype: torch.dtype) -> torch.Tensor:
+ """Generate weights based on pattern"""
+ if pattern == "random":
+ return torch.randn((out_features, in_features), dtype=dtype)
+ elif pattern == "eye":
+ weights = torch.zeros((out_features, in_features), dtype=dtype)
+ min_dim = min(out_features, in_features)
+ weights[:min_dim, :min_dim] = torch.eye(min_dim, dtype=dtype)
+ return weights
+ elif pattern == "ones":
+ return torch.ones((out_features, in_features), dtype=dtype)
+ elif pattern == "alternating":
+ weights = torch.ones((out_features, in_features), dtype=dtype)
+ for i in range(out_features):
+ for j in range(in_features):
+ if (i + j) % 2 == 1:
+ weights[i, j] = -1.0
+ return weights
+ elif pattern == "gradient":
+ x = torch.linspace(-1, 1, in_features)
+ y = torch.linspace(-1, 1, out_features)
+ xx, yy = torch.meshgrid(x, y, indexing='ij')
+ return (xx + yy).t().to(dtype)
+ else:
+ return torch.randn((out_features, in_features), dtype=dtype)
+
+
+def _get_quantizer_from_config(request) -> tuple:
+ """Get quantizer and config from request parameters"""
+ method_map = {
+ "int8": QuantizationMethod.INT8,
+ "int4": QuantizationMethod.INT4,
+ "nf4": QuantizationMethod.NF4
+ }
+ mode_map = {
+ "symmetric": QuantizationMode.SYMMETRIC,
+ "asymmetric": QuantizationMode.ASYMMETRIC
+ }
+
+ config = QuantizationConfig(
+ bits=request.bits,
+ method=method_map.get(request.method, QuantizationMethod.INT8),
+ mode=mode_map.get(request.mode, QuantizationMode.SYMMETRIC),
+ group_size=request.group_size
+ )
+
+ quantizer = get_quantizer(config)
+ return quantizer, config
+
+
+@router.post("/weights")
+async def quantize_custom_weights(request: QuantizeWeightsRequest) -> Dict[str, Any]:
+ """
+ Quantize custom generated weights.
+ This endpoint works without loading a real model.
+ """
+ # Map dtype
+ dtype_map = {
+ "float32": torch.float32,
+ "float16": torch.float16,
+ "bfloat16": torch.bfloat16
+ }
+ dtype = dtype_map.get(request.dtype, torch.float32)
+
+ # Generate weights
+ weights = _generate_weights(
+ request.weight_pattern,
+ request.out_features,
+ request.in_features,
+ dtype
+ )
+
+ # Get quantizer
+ quantizer, config = _get_quantizer_from_config(request)
+
+ # Quantize
+ result = quantizer.quantize(weights)
+
+ # Dequantize for visualization
+ dequantized = quantizer.dequantize(result)
+
+ # Generate visualizations
+ original_heatmap = visualizer.to_dict(
+ visualizer.weight_heatmap(weights, "Original Weights")
+ )
+ quantized_heatmap = visualizer.to_dict(
+ visualizer.weight_heatmap(result.quantized_weights.float(), f"Quantized Weights ({request.bits}-bit)")
+ )
+ dequantized_heatmap = visualizer.to_dict(
+ visualizer.weight_heatmap(dequantized, "Dequantized Weights")
+ )
+ error_heatmap = visualizer.to_dict(
+ visualizer.weight_heatmap((weights - dequantized).abs(), "Quantization Error")
+ )
+ original_hist = visualizer.to_dict(
+ visualizer.weight_histogram(weights, "Original Distribution")
+ )
+ quantized_hist = visualizer.to_dict(
+ visualizer.weight_histogram(result.quantized_weights.float(), "Quantized Distribution")
+ )
+ scales_hist = visualizer.to_dict(
+ visualizer.scales_histogram(result.scales)
+ )
+
+ return {
+ "success": True,
+ "config": config.to_dict(),
+ "stats": {
+ "original_shape": list(weights.shape),
+ "quantized_shape": list(result.quantized_weights.shape),
+ "scales_shape": list(result.scales.shape),
+ "max_error": result.max_error,
+ "mean_error": result.mean_error,
+ "memory_savings_percent": result.memory_savings_percent,
+ "original_dtype": str(weights.dtype),
+ "quantized_dtype": str(result.quantized_weights.dtype)
+ },
+ "visualizations": {
+ "original_heatmap": original_heatmap,
+ "quantized_heatmap": quantized_heatmap,
+ "dequantized_heatmap": dequantized_heatmap,
+ "error_heatmap": error_heatmap,
+ "original_histogram": original_hist,
+ "quantized_histogram": quantized_hist,
+ "scales_histogram": scales_hist
+ }
+ }
+
+
+@router.post("/layer")
+async def quantize_layer(request: QuantizeLayerRequest) -> Dict[str, Any]:
+ """
+ Quantize a specific layer from the loaded model.
+ Requires a model to be loaded first.
+ """
+ if model_loader is None or model_loader.get_model() is None:
+ raise HTTPException(
+ status_code=400,
+ detail="No model loaded. Load a model first or use /quantize/weights for custom weights."
+ )
+
+ # Get layer weights
+ weights = model_loader.get_layer_weights(request.layer_name)
+ if weights is None:
+ raise HTTPException(status_code=404, detail=f"Layer not found: {request.layer_name}")
+
+ # Ensure 2D
+ original_shape = weights.shape
+ if len(weights.shape) == 1:
+ weights = weights.unsqueeze(0)
+ elif len(weights.shape) > 2:
+ weights = weights.reshape(weights.shape[0], -1)
+
+ # Get quantizer
+ quantizer, config = _get_quantizer_from_config(request)
+
+ # Quantize
+ result = quantizer.quantize(weights)
+ dequantized = quantizer.dequantize(result)
+
+ # Generate Visualizations
+ original_hist = visualizer.to_dict(visualizer.weight_histogram(weights, "Original Distribution"))
+ quantized_hist = visualizer.to_dict(visualizer.weight_histogram(result.quantized_weights.float(), "Quantized Distribution"))
+ scales_hist = visualizer.to_dict(visualizer.scales_histogram(result.scales))
+
+ return {
+ "success": True,
+ "layer_name": request.layer_name,
+ "config": config.to_dict(),
+ "stats": {
+ "original_shape": list(original_shape),
+ "quantized_shape": list(result.quantized_weights.shape),
+ "scales_shape": list(result.scales.shape),
+ "max_error": result.max_error,
+ "mean_error": result.mean_error,
+ "memory_savings_percent": result.memory_savings_percent,
+ "original_dtype": str(weights.dtype),
+ "quantized_dtype": str(result.quantized_weights.dtype)
+ },
+ "visualizations": {
+ "original_heatmap": visualizer.to_dict(
+ visualizer.weight_heatmap(weights, f"Original: {request.layer_name}")
+ ),
+ "quantized_heatmap": visualizer.to_dict(
+ visualizer.weight_heatmap(result.quantized_weights.float(), f"Quantized ({request.bits}-bit)")
+ ),
+ "dequantized_heatmap": visualizer.to_dict(
+ visualizer.weight_heatmap(dequantized, "Dequantized Weights")
+ ),
+ "error_heatmap": visualizer.to_dict(
+ visualizer.weight_heatmap((weights - dequantized).abs(), "Error")
+ ),
+ "original_histogram": original_hist,
+ "quantized_histogram": quantized_hist,
+ "scales_histogram": scales_hist
+ }
+ }
+
+
+@router.post("/model")
+async def quantize_model(request: QuantizeModelRequest) -> Dict[str, Any]:
+ """
+ Quantize all quantizable layers in the loaded model.
+ Returns summary statistics for all layers.
+ """
+ if model_loader is None or model_loader.get_model() is None:
+ raise HTTPException(
+ status_code=400,
+ detail="No model loaded. This feature requires a loaded model."
+ )
+
+ model_info = model_loader.get_model_info()
+ if model_info is None:
+ raise HTTPException(status_code=500, detail="Failed to get model info")
+
+ # Determine layers to quantize
+ if request.layers_to_include:
+ layers_to_quantize = request.layers_to_include
+ else:
+ layers_to_quantize = model_info.quantizable_layers
+
+ # Remove skipped layers
+ layers_to_quantize = [l for l in layers_to_quantize if l not in request.layers_to_skip]
+
+ # Get quantizer
+ quantizer, config = _get_quantizer_from_config(request)
+
+ # Quantize each layer
+ results = []
+ total_memory_saved = 0
+ total_original_size = 0
+
+ for layer_name in layers_to_quantize:
+ weights = model_loader.get_layer_weights(layer_name)
+ if weights is None:
+ continue
+
+ # Handle non-2D weights
+ original_shape = weights.shape
+ if len(weights.shape) == 1:
+ weights = weights.unsqueeze(0)
+ elif len(weights.shape) > 2:
+ weights = weights.reshape(weights.shape[0], -1)
+
+ try:
+ result = quantizer.quantize(weights)
+
+ original_bytes = weights.numel() * weights.element_size()
+ total_original_size += original_bytes
+ total_memory_saved += original_bytes * (result.memory_savings_percent / 100)
+
+ results.append({
+ "layer": layer_name,
+ "shape": list(original_shape),
+ "max_error": result.max_error,
+ "mean_error": result.mean_error,
+ "memory_savings_percent": result.memory_savings_percent
+ })
+ except Exception as e:
+ results.append({
+ "layer": layer_name,
+ "error": str(e)
+ })
+
+ return {
+ "success": True,
+ "config": config.to_dict(),
+ "summary": {
+ "layers_quantized": len([r for r in results if "error" not in r]),
+ "layers_failed": len([r for r in results if "error" in r]),
+ "total_memory_saved_mb": total_memory_saved / (1024 * 1024),
+ "average_memory_savings_percent": (total_memory_saved / total_original_size * 100) if total_original_size > 0 else 0
+ },
+ "layers": results
+ }
+
+
+# WebSocket for real-time progress
+@router.websocket("/stream")
+async def quantization_stream(websocket: WebSocket):
+ """WebSocket endpoint for streaming quantization progress"""
+ await websocket.accept()
+
+ try:
+ while True:
+ # Receive quantization request
+ data = await websocket.receive_text()
+ request_data = json.loads(data)
+
+ # Process and send updates
+ await websocket.send_json({
+ "type": "progress",
+ "progress": 0,
+ "message": "Starting quantization..."
+ })
+
+ # Simulate progress (in real implementation, this would be actual quantization)
+ for i in range(0, 101, 10):
+ await asyncio.sleep(0.1)
+ await websocket.send_json({
+ "type": "progress",
+ "progress": i,
+ "message": f"Processing... {i}%"
+ })
+
+ await websocket.send_json({
+ "type": "complete",
+ "message": "Quantization complete"
+ })
+
+ except WebSocketDisconnect:
+ pass
diff --git a/backend/api/routes/system.py b/backend/api/routes/system.py
new file mode 100644
index 0000000000000000000000000000000000000000..13a070a79fac09d555a673712f6709e047ac33e3
--- /dev/null
+++ b/backend/api/routes/system.py
@@ -0,0 +1,64 @@
+"""
+System Routes
+Hardware detection and system information
+"""
+
+from fastapi import APIRouter
+from typing import Dict, Any
+
+from backend.core.system_checker import system_checker, check_model_requirements
+
+router = APIRouter()
+
+
+@router.get("/info")
+async def get_system_info() -> Dict[str, Any]:
+ """
+ Get complete system information including GPU, RAM, and capabilities.
+ """
+ return system_checker.to_dict()
+
+
+@router.get("/capabilities")
+async def get_capabilities() -> Dict[str, Any]:
+ """
+ Get system capabilities for quantization tasks.
+ """
+ info = system_checker.check()
+ return {
+ "capability": info.capability.value,
+ "recommended_batch_size": info.recommended_batch_size,
+ "max_model_size": info.max_model_size,
+ "cuda_available": info.cuda_available,
+ "mps_available": info.mps_available,
+ "gpus": [
+ {
+ "name": gpu.name,
+ "memory_gb": gpu.total_memory_gb
+ }
+ for gpu in info.gpus
+ ]
+ }
+
+
+@router.post("/check-model")
+async def check_model_requirements_endpoint(
+ model_params_billions: float,
+ dtype: str = "fp16"
+) -> Dict[str, Any]:
+ """
+ Check if system can handle a model of specified size.
+
+ Args:
+ model_params_billions: Model size in billions of parameters
+ dtype: Data type (fp32, fp16, int8, int4)
+ """
+ return check_model_requirements(model_params_billions, dtype)
+
+
+@router.get("/refresh")
+async def refresh_system_info() -> Dict[str, Any]:
+ """
+ Force refresh system information.
+ """
+ return system_checker.check(force_refresh=True).__dict__
diff --git a/backend/core/__init__.py b/backend/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..45a1edbf4ad2b8b21cc55d5b784b420951808b45
--- /dev/null
+++ b/backend/core/__init__.py
@@ -0,0 +1,6 @@
+"""
+PyTorch Neural Network Quantizer - Backend Core Package
+Multi-bit quantization engine supporting 4-bit, 8-bit, NF4, and GPTQ methods.
+"""
+
+__version__ = "1.0.0"
diff --git a/backend/core/__pycache__/__init__.cpython-312.pyc b/backend/core/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c31af216559b969a5ce1cde47fd494452a9bfa81
Binary files /dev/null and b/backend/core/__pycache__/__init__.cpython-312.pyc differ
diff --git a/backend/core/__pycache__/model_loader.cpython-312.pyc b/backend/core/__pycache__/model_loader.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..818b40adbe52a45f65c4986b0d03aafa0731eec6
Binary files /dev/null and b/backend/core/__pycache__/model_loader.cpython-312.pyc differ
diff --git a/backend/core/__pycache__/model_manager.cpython-312.pyc b/backend/core/__pycache__/model_manager.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8792b8f757d5bc4343c8b545692119d553e68f1e
Binary files /dev/null and b/backend/core/__pycache__/model_manager.cpython-312.pyc differ
diff --git a/backend/core/__pycache__/quantizer.cpython-312.pyc b/backend/core/__pycache__/quantizer.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..15c709b1e27b0824581a2fc93a7dd8e2b26e0ae9
Binary files /dev/null and b/backend/core/__pycache__/quantizer.cpython-312.pyc differ
diff --git a/backend/core/__pycache__/system_checker.cpython-312.pyc b/backend/core/__pycache__/system_checker.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7fb646cb501a99306c50b96e593d73a745c2a51
Binary files /dev/null and b/backend/core/__pycache__/system_checker.cpython-312.pyc differ
diff --git a/backend/core/__pycache__/visualization.cpython-312.pyc b/backend/core/__pycache__/visualization.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aca7287fa8692956cccd101b3edb97377aadaf22
Binary files /dev/null and b/backend/core/__pycache__/visualization.cpython-312.pyc differ
diff --git a/backend/core/model_loader.py b/backend/core/model_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7b3bbb453c9c344bf09874fcd5535b016347652
--- /dev/null
+++ b/backend/core/model_loader.py
@@ -0,0 +1,411 @@
+"""
+HuggingFace Model Loader
+Loads models from HuggingFace Hub or local files with memory-efficient options.
+"""
+
+import torch
+import gc
+from pathlib import Path
+from typing import Optional, Dict, Any, List, Tuple, Union, TYPE_CHECKING
+from dataclasses import dataclass
+from enum import Enum
+
+try:
+ from transformers import (
+ AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification,
+ AutoTokenizer, AutoConfig
+ )
+ HAS_TRANSFORMERS = True
+except ImportError:
+ HAS_TRANSFORMERS = False
+
+if TYPE_CHECKING:
+ from transformers import PreTrainedModel
+
+from .system_checker import system_checker, check_model_requirements
+
+
+class ModelType(Enum):
+ """Supported model types"""
+ CAUSAL_LM = "causal_lm"
+ SEQUENCE_CLASSIFICATION = "sequence_classification"
+ GENERIC = "generic"
+
+
+@dataclass
+class LayerInfo:
+ """Information about a model layer"""
+ name: str
+ module_type: str
+ shape: Optional[Tuple[int, ...]]
+ num_params: int
+ dtype: str
+ is_quantizable: bool
+
+
+@dataclass
+class ModelInfo:
+ """Complete model information"""
+ name: str
+ model_type: ModelType
+ architecture: str
+ num_params: int
+ num_params_billions: float
+ hidden_size: int
+ num_layers: int
+ vocab_size: Optional[int]
+ dtype: str
+ memory_footprint_gb: float
+ layers: List[LayerInfo]
+ quantizable_layers: List[str]
+
+
+class ModelLoader:
+ """
+ Load and inspect HuggingFace models with memory-efficient options.
+ Provides layer-by-layer analysis for selective quantization.
+ """
+
+ # Layer types that can be quantized
+ QUANTIZABLE_TYPES = (
+ "Linear",
+ "Conv1d",
+ "Conv2d",
+ "Embedding"
+ )
+
+ def __init__(self):
+ if not HAS_TRANSFORMERS:
+ raise ImportError(
+ "transformers library not installed. "
+ "Install with: pip install transformers"
+ )
+ self._loaded_model = None # Optional[PreTrainedModel]
+ self._model_info: Optional[ModelInfo] = None
+ self._tokenizer = None
+
+ def check_requirements(self, model_name: str, dtype: str = "fp16") -> Dict[str, Any]:
+ """Check if system can load the model before attempting"""
+ try:
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
+
+ # Estimate parameters
+ if hasattr(config, 'num_parameters'):
+ num_params = config.num_parameters
+ else:
+ # Estimate from config
+ hidden = getattr(config, 'hidden_size', 768)
+ layers = getattr(config, 'num_hidden_layers', 12)
+ vocab = getattr(config, 'vocab_size', 30000)
+ num_params = self._estimate_params(hidden, layers, vocab)
+
+ params_billions = num_params / 1e9
+ return check_model_requirements(params_billions, dtype)
+
+ except Exception as e:
+ return {
+ "can_load": False,
+ "error": str(e),
+ "warnings": [f"Failed to fetch model config: {str(e)}"]
+ }
+
+ def _estimate_params(self, hidden: int, layers: int, vocab: int) -> int:
+ """Estimate parameter count from config"""
+ # Rough estimate: embeddings + transformer layers
+ embedding_params = vocab * hidden
+ # Each layer: attention (4 * hidden^2) + FFN (8 * hidden^2)
+ layer_params = layers * (12 * hidden * hidden)
+ return embedding_params + layer_params
+
+ def load(self, model_name: str,
+ model_type: ModelType = ModelType.GENERIC,
+ dtype: str = "auto",
+ device: str = "auto",
+ trust_remote_code: bool = True,
+ low_memory: bool = False) -> Tuple[Any, Optional[Any]]:
+ """
+ Load a model from HuggingFace Hub or local path.
+
+ Args:
+ model_name: HuggingFace model ID or local path
+ model_type: Type of model to load
+ dtype: Data type ("auto", "fp32", "fp16", "bf16")
+ device: Device to load to ("auto", "cuda", "cpu", "mps")
+ trust_remote_code: Allow custom code from model repos
+ low_memory: Use memory-efficient loading
+
+ Returns:
+ Tuple of (model, tokenizer)
+ """
+ # Clear previous model
+ self.unload()
+
+ # Determine device
+ if device == "auto":
+ sys_info = system_checker.check()
+ if sys_info.cuda_available:
+ device = "cuda"
+ elif sys_info.mps_available:
+ device = "mps"
+ else:
+ device = "cpu"
+
+ # Determine dtype
+ if dtype == "auto":
+ if device == "cuda":
+ dtype = "fp16"
+ elif device == "mps":
+ dtype = "fp32" # MPS has limited bf16 support
+ else:
+ dtype = "fp32"
+
+ torch_dtype = {
+ "fp32": torch.float32,
+ "fp16": torch.float16,
+ "bf16": torch.bfloat16
+ }.get(dtype, torch.float32)
+
+ # Load config first
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
+
+ # Select model class
+ if model_type == ModelType.CAUSAL_LM:
+ model_class = AutoModelForCausalLM
+ elif model_type == ModelType.SEQUENCE_CLASSIFICATION:
+ model_class = AutoModelForSequenceClassification
+ else:
+ model_class = AutoModel
+
+ # Load model
+ load_kwargs = {
+ "pretrained_model_name_or_path": model_name,
+ "torch_dtype": torch_dtype,
+ "trust_remote_code": trust_remote_code,
+ }
+
+ if low_memory:
+ load_kwargs["low_cpu_mem_usage"] = True
+ if device == "cuda":
+ load_kwargs["device_map"] = "auto"
+
+ model = model_class.from_pretrained(**load_kwargs)
+
+ if not low_memory and device != "cpu":
+ model = model.to(device)
+
+ model.eval()
+
+ # Load tokenizer
+ try:
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_name, trust_remote_code=trust_remote_code
+ )
+ except Exception:
+ tokenizer = None
+
+ self._loaded_model = model
+ self._tokenizer = tokenizer
+ self._model_info = self._analyze_model(model, model_name, model_type)
+
+ return model, tokenizer
+
+ def load_weights_only(self, model_name: str) -> Dict[str, torch.Tensor]:
+ """
+ Load only the state dict without instantiating the model.
+ More memory efficient for inspection.
+ """
+ from safetensors import safe_open
+ from huggingface_hub import hf_hub_download
+
+ try:
+ # Try safetensors first
+ path = hf_hub_download(model_name, "model.safetensors")
+ weights = {}
+ with safe_open(path, framework="pt") as f:
+ for key in f.keys():
+ weights[key] = f.get_tensor(key)
+ return weights
+ except Exception:
+ # Fallback to torch
+ try:
+ path = hf_hub_download(model_name, "pytorch_model.bin")
+ return torch.load(path, map_location="cpu")
+ except Exception as e:
+ raise RuntimeError(f"Failed to load weights: {str(e)}")
+
+ def _analyze_model(self, model: Any, name: str,
+ model_type: ModelType) -> ModelInfo:
+ """Analyze model structure and extract layer information"""
+ layers = []
+ quantizable_layers = []
+ total_params = 0
+
+ for layer_name, module in model.named_modules():
+ if not layer_name:
+ continue
+
+ # Get module info
+ module_type = module.__class__.__name__
+
+ # Check if quantizable
+ is_quantizable = any(
+ qt in module_type for qt in self.QUANTIZABLE_TYPES
+ )
+
+ # Get shape and params for leaf modules
+ shape = None
+ num_params = 0
+ dtype = "N/A"
+
+ if hasattr(module, 'weight') and module.weight is not None:
+ shape = tuple(module.weight.shape)
+ num_params = module.weight.numel()
+ dtype = str(module.weight.dtype)
+ if hasattr(module, 'bias') and module.bias is not None:
+ num_params += module.bias.numel()
+
+ if num_params > 0:
+ total_params += num_params
+ layers.append(LayerInfo(
+ name=layer_name,
+ module_type=module_type,
+ shape=shape,
+ num_params=num_params,
+ dtype=dtype,
+ is_quantizable=is_quantizable
+ ))
+
+ if is_quantizable:
+ quantizable_layers.append(layer_name)
+
+ # Get config info
+ config = model.config
+ hidden_size = getattr(config, 'hidden_size', 768)
+ num_layers = getattr(config, 'num_hidden_layers', 12)
+ vocab_size = getattr(config, 'vocab_size', None)
+
+ # Calculate memory
+ memory_gb = sum(p.numel() * p.element_size() for p in model.parameters()) / (1024**3)
+
+ return ModelInfo(
+ name=name,
+ model_type=model_type,
+ architecture=config.architectures[0] if hasattr(config, 'architectures') and config.architectures else "Unknown",
+ num_params=total_params,
+ num_params_billions=total_params / 1e9,
+ hidden_size=hidden_size,
+ num_layers=num_layers,
+ vocab_size=vocab_size,
+ dtype=str(next(model.parameters()).dtype),
+ memory_footprint_gb=round(memory_gb, 2),
+ layers=layers,
+ quantizable_layers=quantizable_layers
+ )
+
+ def register_model(self, model: Any, name: str, tokenizer: Any = None):
+ """Register an externally loaded model"""
+ self._loaded_model = model
+ self._tokenizer = tokenizer
+ self._model_info = self._analyze_model(model, name, ModelType.GENERIC)
+
+ def get_layer_weights(self, layer_name: str) -> Optional[torch.Tensor]:
+ """Get weights from a specific layer"""
+ if self._loaded_model is None:
+ raise RuntimeError("No model loaded")
+
+ for name, module in self._loaded_model.named_modules():
+ if name == layer_name:
+ if hasattr(module, 'weight'):
+ return module.weight.data.clone()
+ return None
+
+ def set_layer_weights(self, layer_name: str, weights: torch.Tensor):
+ """Set weights for a specific layer"""
+ if self._loaded_model is None:
+ raise RuntimeError("No model loaded")
+
+ for name, module in self._loaded_model.named_modules():
+ if name == layer_name:
+ if hasattr(module, 'weight'):
+ module.weight.data = weights.to(module.weight.device)
+ return
+ raise ValueError(f"Layer not found: {layer_name}")
+
+ def get_model_info(self) -> Optional[ModelInfo]:
+ """Get current model information"""
+ return self._model_info
+
+ def get_model(self) -> Optional[Any]:
+ """Get loaded model"""
+ return self._loaded_model
+
+ def get_tokenizer(self):
+ """Get loaded tokenizer"""
+ return self._tokenizer
+
+ def unload(self):
+ """Unload model and free memory"""
+ if self._loaded_model is not None:
+ del self._loaded_model
+ self._loaded_model = None
+
+ if self._tokenizer is not None:
+ del self._tokenizer
+ self._tokenizer = None
+
+ self._model_info = None
+
+ # Force garbage collection
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+
+ def to_dict(self) -> Optional[Dict[str, Any]]:
+ """Convert model info to dictionary"""
+ if self._model_info is None:
+ return None
+
+ info = self._model_info
+ return {
+ "name": info.name,
+ "model_type": info.model_type.value,
+ "architecture": info.architecture,
+ "num_params": info.num_params,
+ "num_params_billions": round(info.num_params_billions, 3),
+ "hidden_size": info.hidden_size,
+ "num_layers": info.num_layers,
+ "vocab_size": info.vocab_size,
+ "dtype": info.dtype,
+ "memory_footprint_gb": info.memory_footprint_gb,
+ "num_quantizable_layers": len(info.quantizable_layers),
+ "quantizable_layers": info.quantizable_layers,
+ "layers": [
+ {
+ "name": layer.name,
+ "module_type": layer.module_type,
+ "shape": layer.shape,
+ "num_params": layer.num_params,
+ "dtype": layer.dtype,
+ "is_quantizable": layer.is_quantizable
+ }
+ for layer in info.layers
+ ]
+ }
+
+
+# Global instance
+model_loader = ModelLoader() if HAS_TRANSFORMERS else None
+
+
+def load_model(model_name: str, **kwargs) -> Tuple[Any, Any]:
+ """Convenience function to load a model"""
+ if model_loader is None:
+ raise ImportError("transformers not available")
+ return model_loader.load(model_name, **kwargs)
+
+
+def get_model_info() -> Optional[Dict[str, Any]]:
+ """Get current model information"""
+ if model_loader is None:
+ return None
+ return model_loader.to_dict()
diff --git a/backend/core/model_manager.py b/backend/core/model_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..78247523f435a48691bfff133a5b177e0532ef07
--- /dev/null
+++ b/backend/core/model_manager.py
@@ -0,0 +1,247 @@
+"""
+Model Manager with Download Progress, Caching, and Auto-Cleanup
+Designed to work with HuggingFace Spaces disk storage
+"""
+
+import os
+import time
+import shutil
+import asyncio
+import threading
+from pathlib import Path
+from typing import Optional, Dict, Any, Callable
+from dataclasses import dataclass
+from datetime import datetime, timedelta
+
+# HuggingFace cache directory - works on Spaces
+HF_CACHE_DIR = os.environ.get("HF_HOME", Path.home() / ".cache" / "huggingface")
+MODEL_CACHE_DIR = Path(HF_CACHE_DIR) / "hub"
+
+# Sample models that should always be available (tiny models for quick testing)
+SAMPLE_MODELS = [
+ "prajjwal1/bert-tiny", # 4.4MB - Perfect for testing
+]
+
+# Auto-cleanup interval (4 hours)
+CLEANUP_INTERVAL_HOURS = 4
+
+# Track download progress
+_download_progress: Dict[str, Dict[str, Any]] = {}
+_cleanup_thread: Optional[threading.Thread] = None
+
+
+@dataclass
+class DownloadProgress:
+ """Track download progress for a model"""
+ model_name: str
+ status: str # "pending", "downloading", "extracting", "complete", "error"
+ current_file: str
+ files_completed: int
+ total_files: int
+ bytes_downloaded: int
+ total_bytes: int
+ speed_mbps: float
+ eta_seconds: int
+ error: Optional[str] = None
+
+
+def get_download_progress(model_name: str) -> Optional[Dict[str, Any]]:
+ """Get current download progress for a model"""
+ return _download_progress.get(model_name)
+
+
+def set_download_progress(model_name: str, progress: Dict[str, Any]):
+ """Update download progress"""
+ _download_progress[model_name] = {
+ **progress,
+ "timestamp": time.time()
+ }
+
+
+def clear_download_progress(model_name: str):
+ """Clear download progress after completion"""
+ if model_name in _download_progress:
+ del _download_progress[model_name]
+
+
+def get_cached_models() -> list:
+ """Get list of models currently in cache"""
+ cached = []
+
+ if not MODEL_CACHE_DIR.exists():
+ return cached
+
+ for item in MODEL_CACHE_DIR.iterdir():
+ if item.is_dir() and item.name.startswith("models--"):
+ # Parse model name from directory name
+ parts = item.name.replace("models--", "").split("--")
+ if len(parts) >= 2:
+ model_name = f"{parts[0]}/{parts[1]}"
+ else:
+ model_name = parts[0]
+
+ # Get size
+ size_mb = sum(f.stat().st_size for f in item.rglob("*") if f.is_file()) / (1024 * 1024)
+
+ # Get last access time
+ try:
+ last_access = item.stat().st_atime
+ except:
+ last_access = time.time()
+
+ cached.append({
+ "name": model_name,
+ "path": str(item),
+ "size_mb": round(size_mb, 2),
+ "last_access": datetime.fromtimestamp(last_access).isoformat(),
+ "is_sample": model_name in SAMPLE_MODELS
+ })
+
+ return cached
+
+
+def cleanup_old_models(max_age_hours: float = CLEANUP_INTERVAL_HOURS):
+ """
+ Remove models that haven't been accessed in max_age_hours.
+ Sample models are never deleted.
+ """
+ if not MODEL_CACHE_DIR.exists():
+ return {"deleted": [], "kept": []}
+
+ deleted = []
+ kept = []
+ cutoff_time = time.time() - (max_age_hours * 3600)
+
+ for item in MODEL_CACHE_DIR.iterdir():
+ if item.is_dir() and item.name.startswith("models--"):
+ # Parse model name
+ parts = item.name.replace("models--", "").split("--")
+ if len(parts) >= 2:
+ model_name = f"{parts[0]}/{parts[1]}"
+ else:
+ model_name = parts[0]
+
+ # Never delete sample models
+ if model_name in SAMPLE_MODELS:
+ kept.append(model_name)
+ continue
+
+ # Check last access time
+ try:
+ last_access = item.stat().st_atime
+ if last_access < cutoff_time:
+ shutil.rmtree(item)
+ deleted.append(model_name)
+ else:
+ kept.append(model_name)
+ except Exception as e:
+ kept.append(f"{model_name} (error: {str(e)})")
+
+ return {"deleted": deleted, "kept": kept}
+
+
+def delete_model_cache(model_name: str) -> bool:
+ """Delete a specific model from cache"""
+ if model_name in SAMPLE_MODELS:
+ return False # Don't delete sample models
+
+ # Convert model name to cache directory name
+ cache_name = f"models--{model_name.replace('/', '--')}"
+ cache_path = MODEL_CACHE_DIR / cache_name
+
+ if cache_path.exists():
+ try:
+ shutil.rmtree(cache_path)
+ return True
+ except:
+ return False
+ return False
+
+
+def ensure_sample_models():
+ """
+ Ensure sample models are downloaded.
+ Called on startup to pre-download tiny test models.
+ """
+ try:
+ from transformers import AutoModel, AutoConfig
+
+ for model_name in SAMPLE_MODELS:
+ try:
+ # Just load config first (fast)
+ AutoConfig.from_pretrained(model_name)
+ print(f"[ModelManager] Sample model '{model_name}' is available")
+ except Exception as e:
+ print(f"[ModelManager] Sample model '{model_name}' not cached: {e}")
+ except ImportError:
+ print("[ModelManager] transformers not installed, skipping sample model check")
+
+
+def start_cleanup_scheduler():
+ """Start background thread for periodic cleanup"""
+ global _cleanup_thread
+
+ if _cleanup_thread is not None and _cleanup_thread.is_alive():
+ return
+
+ def cleanup_loop():
+ while True:
+ time.sleep(CLEANUP_INTERVAL_HOURS * 3600) # Wait 4 hours
+ try:
+ result = cleanup_old_models()
+ if result["deleted"]:
+ print(f"[ModelManager] Cleaned up models: {result['deleted']}")
+ except Exception as e:
+ print(f"[ModelManager] Cleanup error: {e}")
+
+ _cleanup_thread = threading.Thread(target=cleanup_loop, daemon=True)
+ _cleanup_thread.start()
+ print(f"[ModelManager] Cleanup scheduler started (every {CLEANUP_INTERVAL_HOURS} hours)")
+
+
+def get_cache_stats() -> Dict[str, Any]:
+ """Get cache statistics"""
+ models = get_cached_models()
+ total_size = sum(m["size_mb"] for m in models)
+ sample_count = sum(1 for m in models if m["is_sample"])
+
+ return {
+ "cache_dir": str(MODEL_CACHE_DIR),
+ "total_models": len(models),
+ "sample_models": sample_count,
+ "total_size_mb": round(total_size, 2),
+ "cleanup_interval_hours": CLEANUP_INTERVAL_HOURS,
+ "models": models
+ }
+
+
+# Progress callback for HuggingFace downloads
+class DownloadProgressCallback:
+ """Callback to track HuggingFace download progress"""
+
+ def __init__(self, model_name: str):
+ self.model_name = model_name
+ self.start_time = time.time()
+ self.last_update = 0
+
+ def __call__(self, current: int, total: int, filename: str = ""):
+ now = time.time()
+
+ # Throttle updates to every 0.5 seconds
+ if now - self.last_update < 0.5:
+ return
+
+ self.last_update = now
+ elapsed = now - self.start_time
+ speed = current / elapsed if elapsed > 0 else 0
+ eta = int((total - current) / speed) if speed > 0 else 0
+
+ set_download_progress(self.model_name, {
+ "status": "downloading",
+ "current_file": filename,
+ "bytes_downloaded": current,
+ "total_bytes": total,
+ "percent": round(100 * current / total, 1) if total > 0 else 0,
+ "speed_mbps": round(speed / (1024 * 1024), 2),
+ "eta_seconds": eta
+ })
diff --git a/backend/core/quantizer.py b/backend/core/quantizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..7654f34bf310971980d885367c1c879376599752
--- /dev/null
+++ b/backend/core/quantizer.py
@@ -0,0 +1,605 @@
+"""
+Multi-bit Weight Quantization Engine
+Supports INT8, INT4, NF4, and GPTQ-style quantization methods.
+"""
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+from typing import Optional, Tuple, Dict, Any, Literal
+from dataclasses import dataclass
+from enum import Enum
+
+
+class QuantizationMethod(Enum):
+ """Supported quantization methods"""
+ INT8 = "int8" # 8-bit integer quantization
+ INT4 = "int4" # 4-bit integer quantization
+ NF4 = "nf4" # Normal Float 4-bit (QLoRA style)
+ GPTQ = "gptq" # GPTQ reconstruction-based
+
+
+class QuantizationMode(Enum):
+ """Quantization modes"""
+ SYMMETRIC = "symmetric" # Range: [-max, max]
+ ASYMMETRIC = "asymmetric" # Range: [min, max]
+
+
+@dataclass
+class QuantizationConfig:
+ """Configuration for quantization process"""
+ bits: int = 8
+ method: QuantizationMethod = QuantizationMethod.INT8
+ mode: QuantizationMode = QuantizationMode.SYMMETRIC
+ group_size: Optional[int] = None # None = per-channel, else group quantization
+ use_double_quant: bool = False # Double quantization for scales
+ compute_dtype: torch.dtype = torch.float32
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "bits": self.bits,
+ "method": self.method.value,
+ "mode": self.mode.value,
+ "group_size": self.group_size,
+ "use_double_quant": self.use_double_quant,
+ "compute_dtype": str(self.compute_dtype)
+ }
+
+
+@dataclass
+class QuantizationResult:
+ """Result of quantization operation"""
+ quantized_weights: torch.Tensor
+ scales: torch.Tensor
+ zero_points: Optional[torch.Tensor]
+ original_shape: Tuple[int, ...]
+ config: QuantizationConfig
+ max_error: float
+ mean_error: float
+ memory_savings_percent: float
+
+
+class BaseQuantizer:
+ """Base class for all quantizers"""
+
+ def __init__(self, config: QuantizationConfig):
+ self.config = config
+
+ def quantize(self, weights: torch.Tensor) -> QuantizationResult:
+ raise NotImplementedError
+
+ def dequantize(self, result: QuantizationResult) -> torch.Tensor:
+ raise NotImplementedError
+
+ def _calculate_error(self, original: torch.Tensor, dequantized: torch.Tensor) -> Tuple[float, float]:
+ """Calculate quantization error metrics"""
+ abs_error = (original - dequantized).abs()
+ return abs_error.max().item(), abs_error.mean().item()
+
+ def _calculate_memory_savings(self, original: torch.Tensor, quantized: torch.Tensor,
+ scales: torch.Tensor) -> float:
+ """Calculate memory savings percentage"""
+ original_bytes = original.numel() * original.element_size()
+ quantized_bytes = quantized.numel() * quantized.element_size()
+ scales_bytes = scales.numel() * scales.element_size()
+
+ new_bytes = quantized_bytes + scales_bytes
+ savings = 100 * (1 - new_bytes / original_bytes)
+ return savings
+
+
+class INT8Quantizer(BaseQuantizer):
+ """8-bit integer quantization (W8A16)"""
+
+ def __init__(self, config: Optional[QuantizationConfig] = None):
+ if config is None:
+ config = QuantizationConfig(bits=8, method=QuantizationMethod.INT8)
+ super().__init__(config)
+
+ def quantize(self, weights: torch.Tensor) -> QuantizationResult:
+ """
+ Quantize weights to INT8 precision.
+
+ Args:
+ weights: Tensor of shape (out_features, in_features)
+
+ Returns:
+ QuantizationResult with int8 weights and scales
+ """
+ original_shape = weights.shape
+ w_fp32 = weights.clone().to(torch.float32)
+
+ if self.config.group_size is not None:
+ # Group quantization
+ return self._quantize_grouped(w_fp32, original_shape)
+
+ # Per-channel quantization
+ if self.config.mode == QuantizationMode.SYMMETRIC:
+ # Symmetric: scale = max(|w|) / 127
+ scales = w_fp32.abs().max(dim=-1).values / 127
+ scales = scales.clamp(min=1e-8) # Avoid division by zero
+ zero_points = None
+
+ # Quantize
+ int8_weights = torch.round(w_fp32 / scales.unsqueeze(1)).clamp(-128, 127).to(torch.int8)
+
+ else:
+ # Asymmetric: use full [-128, 127] range
+ w_min = w_fp32.min(dim=-1).values
+ w_max = w_fp32.max(dim=-1).values
+
+ scales = (w_max - w_min) / 255
+ scales = scales.clamp(min=1e-8)
+ zero_points = torch.round(-w_min / scales).clamp(0, 255).to(torch.int32)
+
+ # Quantize
+ int8_weights = torch.round(w_fp32 / scales.unsqueeze(1) + zero_points.unsqueeze(1))
+ int8_weights = int8_weights.clamp(-128, 127).to(torch.int8)
+
+ # Dequantize for error calculation
+ dequantized = self.dequantize_weights(int8_weights, scales, zero_points)
+ max_error, mean_error = self._calculate_error(weights, dequantized)
+ memory_savings = self._calculate_memory_savings(weights, int8_weights, scales)
+
+ return QuantizationResult(
+ quantized_weights=int8_weights,
+ scales=scales,
+ zero_points=zero_points,
+ original_shape=original_shape,
+ config=self.config,
+ max_error=max_error,
+ mean_error=mean_error,
+ memory_savings_percent=memory_savings
+ )
+
+ def _quantize_grouped(self, weights: torch.Tensor, original_shape: Tuple[int, ...]) -> QuantizationResult:
+ """Quantize with group-wise scaling"""
+ out_features, in_features = weights.shape
+ group_size = self.config.group_size
+
+ # Pad if necessary
+ if in_features % group_size != 0:
+ pad_size = group_size - (in_features % group_size)
+ weights = F.pad(weights, (0, pad_size))
+ in_features = weights.shape[1]
+
+ # Reshape for group quantization
+ num_groups = in_features // group_size
+ weights_grouped = weights.reshape(out_features, num_groups, group_size)
+
+ # Calculate scales per group
+ scales = weights_grouped.abs().max(dim=-1).values / 127
+ scales = scales.clamp(min=1e-8)
+
+ # Quantize
+ int8_weights = torch.round(weights_grouped / scales.unsqueeze(-1))
+ int8_weights = int8_weights.clamp(-128, 127).to(torch.int8)
+ int8_weights = int8_weights.reshape(out_features, in_features)
+
+ # Trim padding
+ int8_weights = int8_weights[:, :original_shape[1]]
+ scales = scales.reshape(out_features, num_groups)
+
+ # Dequantize for error calculation
+ dequantized = self.dequantize_weights(int8_weights, scales, None, group_size)
+ max_error, mean_error = self._calculate_error(
+ weights[:, :original_shape[1]], dequantized
+ )
+ memory_savings = self._calculate_memory_savings(
+ weights[:, :original_shape[1]], int8_weights, scales
+ )
+
+ return QuantizationResult(
+ quantized_weights=int8_weights,
+ scales=scales,
+ zero_points=None,
+ original_shape=original_shape,
+ config=self.config,
+ max_error=max_error,
+ mean_error=mean_error,
+ memory_savings_percent=memory_savings
+ )
+
+ def dequantize_weights(self, int8_weights: torch.Tensor, scales: torch.Tensor,
+ zero_points: Optional[torch.Tensor] = None,
+ group_size: Optional[int] = None) -> torch.Tensor:
+ """Dequantize INT8 weights back to floating point"""
+ if group_size is not None:
+ # Group dequantization
+ out_features, in_features = int8_weights.shape
+ num_groups = scales.shape[1]
+
+ # Expand scales to match weight shape
+ scales_expanded = scales.unsqueeze(-1).expand(-1, -1, group_size)
+ scales_expanded = scales_expanded.reshape(out_features, -1)[:, :in_features]
+
+ return int8_weights.float() * scales_expanded
+
+ if zero_points is not None:
+ # Asymmetric dequantization
+ return (int8_weights.float() - zero_points.unsqueeze(1).float()) * scales.unsqueeze(1)
+
+ # Symmetric dequantization
+ return int8_weights.float() * scales.unsqueeze(1)
+
+ def dequantize(self, result: QuantizationResult) -> torch.Tensor:
+ """Dequantize from QuantizationResult"""
+ return self.dequantize_weights(
+ result.quantized_weights,
+ result.scales,
+ result.zero_points,
+ result.config.group_size
+ )
+
+
+class INT4Quantizer(BaseQuantizer):
+ """4-bit integer quantization (W4A16)"""
+
+ def __init__(self, config: Optional[QuantizationConfig] = None):
+ if config is None:
+ config = QuantizationConfig(bits=4, method=QuantizationMethod.INT4, group_size=128)
+ super().__init__(config)
+
+ def quantize(self, weights: torch.Tensor) -> QuantizationResult:
+ """
+ Quantize weights to INT4 precision.
+ Uses group quantization for better accuracy.
+
+ Args:
+ weights: Tensor of shape (out_features, in_features)
+
+ Returns:
+ QuantizationResult with packed int4 weights and scales
+ """
+ original_shape = weights.shape
+ w_fp32 = weights.clone().to(torch.float32)
+ out_features, in_features = w_fp32.shape
+
+ group_size = self.config.group_size or 128
+
+ # Pad if necessary
+ if in_features % group_size != 0:
+ pad_size = group_size - (in_features % group_size)
+ w_fp32 = F.pad(w_fp32, (0, pad_size))
+ in_features = w_fp32.shape[1]
+
+ # Reshape for group quantization
+ num_groups = in_features // group_size
+ weights_grouped = w_fp32.reshape(out_features, num_groups, group_size)
+
+ if self.config.mode == QuantizationMode.SYMMETRIC:
+ # Symmetric: range [-8, 7] for signed int4
+ scales = weights_grouped.abs().max(dim=-1).values / 7
+ scales = scales.clamp(min=1e-8)
+ zero_points = None
+
+ # Quantize to int4 range
+ int4_weights = torch.round(weights_grouped / scales.unsqueeze(-1))
+ int4_weights = int4_weights.clamp(-8, 7).to(torch.int8) # Store as int8
+ else:
+ # Asymmetric: range [0, 15] for unsigned int4
+ w_min = weights_grouped.min(dim=-1).values
+ w_max = weights_grouped.max(dim=-1).values
+
+ scales = (w_max - w_min) / 15
+ scales = scales.clamp(min=1e-8)
+ zero_points = torch.round(-w_min / scales).clamp(0, 15).to(torch.int8)
+
+ int4_weights = torch.round(weights_grouped / scales.unsqueeze(-1) + zero_points.unsqueeze(-1))
+ int4_weights = int4_weights.clamp(0, 15).to(torch.int8)
+
+ # Reshape back
+ int4_weights = int4_weights.reshape(out_features, in_features)
+ int4_weights = int4_weights[:, :original_shape[1]]
+
+ # Pack two int4 values into one int8 (for memory efficiency)
+ packed_weights = self._pack_int4(int4_weights)
+
+ # Dequantize for error calculation
+ dequantized = self.dequantize_weights(int4_weights, scales, zero_points, group_size)
+ dequantized = dequantized[:, :original_shape[1]]
+
+ max_error, mean_error = self._calculate_error(weights, dequantized)
+
+ # Memory savings: int4 is half of int8
+ original_bytes = weights.numel() * weights.element_size()
+ packed_bytes = packed_weights.numel() * packed_weights.element_size()
+ scales_bytes = scales.numel() * scales.element_size()
+ memory_savings = 100 * (1 - (packed_bytes + scales_bytes) / original_bytes)
+
+ return QuantizationResult(
+ quantized_weights=packed_weights,
+ scales=scales.reshape(out_features, num_groups),
+ zero_points=zero_points.reshape(out_features, num_groups) if zero_points is not None else None,
+ original_shape=original_shape,
+ config=self.config,
+ max_error=max_error,
+ mean_error=mean_error,
+ memory_savings_percent=memory_savings
+ )
+
+ def _pack_int4(self, int4_weights: torch.Tensor) -> torch.Tensor:
+ """Pack two int4 values into one int8"""
+ out_features, in_features = int4_weights.shape
+
+ # Ensure even number of features
+ if in_features % 2 != 0:
+ int4_weights = F.pad(int4_weights, (0, 1))
+ in_features += 1
+
+ # Reshape and pack
+ reshaped = int4_weights.reshape(out_features, in_features // 2, 2)
+ # Pack: low 4 bits + high 4 bits
+ packed = (reshaped[:, :, 0] & 0x0F) | ((reshaped[:, :, 1] & 0x0F) << 4)
+ return packed.to(torch.int8)
+
+ def _unpack_int4(self, packed_weights: torch.Tensor, original_in_features: int) -> torch.Tensor:
+ """Unpack int8 to two int4 values"""
+ out_features = packed_weights.shape[0]
+
+ # Unpack
+ low = packed_weights & 0x0F
+ high = (packed_weights >> 4) & 0x0F
+
+ # Handle signed values
+ low = torch.where(low > 7, low - 16, low)
+ high = torch.where(high > 7, high - 16, high)
+
+ # Interleave
+ unpacked = torch.stack([low, high], dim=-1).reshape(out_features, -1)
+ return unpacked[:, :original_in_features]
+
+ def dequantize_weights(self, int4_weights: torch.Tensor, scales: torch.Tensor,
+ zero_points: Optional[torch.Tensor] = None,
+ group_size: Optional[int] = None) -> torch.Tensor:
+ """Dequantize INT4 weights back to floating point"""
+ out_features, in_features = int4_weights.shape
+ group_size = group_size or self.config.group_size or 128
+ num_groups = scales.shape[1] if scales.dim() > 1 else 1
+
+ # Expand scales
+ scales_flat = scales.reshape(out_features, num_groups)
+ scales_expanded = scales_flat.unsqueeze(-1).expand(-1, -1, group_size)
+ scales_expanded = scales_expanded.reshape(out_features, -1)[:, :in_features]
+
+ if zero_points is not None:
+ zp_flat = zero_points.reshape(out_features, num_groups)
+ zp_expanded = zp_flat.unsqueeze(-1).expand(-1, -1, group_size)
+ zp_expanded = zp_expanded.reshape(out_features, -1)[:, :in_features]
+ return (int4_weights.float() - zp_expanded.float()) * scales_expanded
+
+ return int4_weights.float() * scales_expanded
+
+ def dequantize(self, result: QuantizationResult) -> torch.Tensor:
+ """Dequantize from QuantizationResult (handles packed weights)"""
+ unpacked = self._unpack_int4(result.quantized_weights, result.original_shape[1])
+ return self.dequantize_weights(
+ unpacked,
+ result.scales,
+ result.zero_points,
+ result.config.group_size
+ )
+
+
+class NF4Quantizer(BaseQuantizer):
+ """
+ Normal Float 4-bit quantization (NF4).
+ Uses a fixed codebook optimized for normally distributed weights.
+ """
+
+ # NF4 codebook: values optimized for normal distribution
+ NF4_CODEBOOK = torch.tensor([
+ -1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453,
+ -0.28444138169288635, -0.18477343022823334, -0.09105003625154495, 0.0,
+ 0.07958029955625534, 0.16093020141124725, 0.24611008348274231,
+ 0.33791524171829224, 0.44070982933044434, 0.5626170039176941,
+ 0.7229568362236023, 1.0
+ ])
+
+ def __init__(self, config: Optional[QuantizationConfig] = None):
+ if config is None:
+ config = QuantizationConfig(bits=4, method=QuantizationMethod.NF4, group_size=64)
+ super().__init__(config)
+
+ # Fix the codebook
+ self.codebook = torch.tensor([
+ -1.0, -0.6962, -0.5251, -0.3949, -0.2844, -0.1848, -0.0911, 0.0,
+ 0.0796, 0.1609, 0.2461, 0.3379, 0.4407, 0.5626, 0.7230, 1.0
+ ])
+
+ def quantize(self, weights: torch.Tensor) -> QuantizationResult:
+ """Quantize weights using NF4 codebook"""
+ original_shape = weights.shape
+ w_fp32 = weights.clone().to(torch.float32)
+ out_features, in_features = w_fp32.shape
+
+ group_size = self.config.group_size or 64
+
+ # Pad if needed
+ if in_features % group_size != 0:
+ pad_size = group_size - (in_features % group_size)
+ w_fp32 = F.pad(w_fp32, (0, pad_size))
+ in_features = w_fp32.shape[1]
+
+ # Reshape for group quantization
+ num_groups = in_features // group_size
+ weights_grouped = w_fp32.reshape(out_features, num_groups, group_size)
+
+ # Calculate absmax scales per group
+ scales = weights_grouped.abs().max(dim=-1).values
+ scales = scales.clamp(min=1e-8)
+
+ # Normalize weights
+ normalized = weights_grouped / scales.unsqueeze(-1)
+
+ # Find nearest codebook entry for each weight
+ codebook = self.codebook.to(weights.device)
+ distances = torch.abs(normalized.unsqueeze(-1) - codebook)
+ indices = distances.argmin(dim=-1).to(torch.int8)
+
+ # Reshape back
+ indices = indices.reshape(out_features, in_features)[:, :original_shape[1]]
+
+ # Pack indices
+ packed = self._pack_int4(indices)
+
+ # Dequantize for error calculation
+ dequantized = self.dequantize_weights(indices, scales.reshape(out_features, num_groups), group_size)
+ dequantized = dequantized[:, :original_shape[1]]
+
+ max_error, mean_error = self._calculate_error(weights, dequantized)
+
+ # Memory savings
+ original_bytes = weights.numel() * weights.element_size()
+ packed_bytes = packed.numel() * packed.element_size()
+ scales_bytes = scales.numel() * scales.element_size()
+ memory_savings = 100 * (1 - (packed_bytes + scales_bytes) / original_bytes)
+
+ return QuantizationResult(
+ quantized_weights=packed,
+ scales=scales.reshape(out_features, num_groups),
+ zero_points=None,
+ original_shape=original_shape,
+ config=self.config,
+ max_error=max_error,
+ mean_error=mean_error,
+ memory_savings_percent=memory_savings
+ )
+
+ def _pack_int4(self, indices: torch.Tensor) -> torch.Tensor:
+ """Pack two indices into one int8"""
+ out_features, in_features = indices.shape
+ if in_features % 2 != 0:
+ indices = F.pad(indices, (0, 1))
+ in_features += 1
+
+ reshaped = indices.reshape(out_features, in_features // 2, 2)
+ packed = (reshaped[:, :, 0] & 0x0F) | ((reshaped[:, :, 1] & 0x0F) << 4)
+ return packed.to(torch.int8)
+
+ def _unpack_int4(self, packed: torch.Tensor, original_in_features: int) -> torch.Tensor:
+ """Unpack int8 to two indices"""
+ out_features = packed.shape[0]
+ low = packed & 0x0F
+ high = (packed >> 4) & 0x0F
+ unpacked = torch.stack([low, high], dim=-1).reshape(out_features, -1)
+ return unpacked[:, :original_in_features]
+
+ def dequantize_weights(self, indices: torch.Tensor, scales: torch.Tensor,
+ group_size: Optional[int] = None) -> torch.Tensor:
+ """Dequantize NF4 indices back to floating point"""
+ codebook = self.codebook.to(indices.device)
+
+ # Look up codebook values
+ dequantized = codebook[indices.long()]
+
+ # Apply scales
+ out_features, in_features = indices.shape
+ group_size = group_size or self.config.group_size or 64
+ num_groups = scales.shape[1]
+
+ scales_expanded = scales.unsqueeze(-1).expand(-1, -1, group_size)
+ scales_expanded = scales_expanded.reshape(out_features, -1)[:, :in_features]
+
+ return dequantized * scales_expanded
+
+ def dequantize(self, result: QuantizationResult) -> torch.Tensor:
+ """Dequantize from QuantizationResult"""
+ unpacked = self._unpack_int4(result.quantized_weights, result.original_shape[1])
+ return self.dequantize_weights(
+ unpacked,
+ result.scales,
+ result.config.group_size
+ )
+
+
+class QuantizedLinear(nn.Module):
+ """
+ Quantized Linear layer supporting multiple quantization methods.
+ Compatible with W8A16, W4A16, NF4, and GPTQ quantization.
+ """
+
+ def __init__(self, in_features: int, out_features: int, bias: bool = True,
+ config: Optional[QuantizationConfig] = None):
+ super().__init__()
+
+ self.in_features = in_features
+ self.out_features = out_features
+ self.config = config or QuantizationConfig()
+
+ # Initialize quantizer based on config
+ self.quantizer = self._get_quantizer()
+
+ # Buffers for quantized weights
+ self.register_buffer("quantized_weights", None)
+ self.register_buffer("scales", None)
+ self.register_buffer("zero_points", None)
+
+ # Bias (kept in full precision)
+ if bias:
+ self.register_buffer("bias", torch.zeros(out_features))
+ else:
+ self.bias = None
+
+ self._quantized = False
+
+ def _get_quantizer(self) -> BaseQuantizer:
+ """Get appropriate quantizer based on config"""
+ if self.config.method == QuantizationMethod.INT8:
+ return INT8Quantizer(self.config)
+ elif self.config.method == QuantizationMethod.INT4:
+ return INT4Quantizer(self.config)
+ elif self.config.method == QuantizationMethod.NF4:
+ return NF4Quantizer(self.config)
+ else:
+ raise ValueError(f"Unsupported quantization method: {self.config.method}")
+
+ def quantize_weights(self, weights: torch.Tensor, bias: Optional[torch.Tensor] = None) -> QuantizationResult:
+ """Quantize weights and store in layer"""
+ result = self.quantizer.quantize(weights)
+
+ self.quantized_weights = result.quantized_weights
+ self.scales = result.scales
+ self.zero_points = result.zero_points
+
+ if bias is not None:
+ self.bias = bias.clone()
+
+ self._quantized = True
+ return result
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass with dequantization on-the-fly"""
+ if not self._quantized:
+ raise RuntimeError("Layer has not been quantized. Call quantize_weights first.")
+
+ # Dequantize weights
+ weights = self.quantizer.dequantize(QuantizationResult(
+ quantized_weights=self.quantized_weights,
+ scales=self.scales,
+ zero_points=self.zero_points,
+ original_shape=(self.out_features, self.in_features),
+ config=self.config,
+ max_error=0, mean_error=0, memory_savings_percent=0
+ ))
+
+ # Linear operation
+ output = F.linear(x, weights.to(x.dtype))
+
+ if self.bias is not None:
+ output = output + self.bias.to(x.dtype)
+
+ return output
+
+
+def get_quantizer(config: QuantizationConfig) -> BaseQuantizer:
+ """Factory function to get appropriate quantizer"""
+ if config.method == QuantizationMethod.INT8:
+ return INT8Quantizer(config)
+ elif config.method == QuantizationMethod.INT4:
+ return INT4Quantizer(config)
+ elif config.method == QuantizationMethod.NF4:
+ return NF4Quantizer(config)
+ else:
+ raise ValueError(f"Unsupported method: {config.method}")
diff --git a/backend/core/system_checker.py b/backend/core/system_checker.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3c51ec3ccec2ef6fdf81ced50415fc7a4176d00
--- /dev/null
+++ b/backend/core/system_checker.py
@@ -0,0 +1,299 @@
+"""
+System Requirements Checker
+Detects GPU availability, memory, and provides hardware recommendations.
+"""
+
+import torch
+import psutil
+import platform
+from dataclasses import dataclass
+from typing import Optional, List, Dict, Any
+from enum import Enum
+
+
+class HardwareCapability(Enum):
+ """Hardware capability levels"""
+ FULL_GPU = "full_gpu" # CUDA GPU with sufficient VRAM
+ LIMITED_GPU = "limited_gpu" # CUDA GPU with limited VRAM
+ CPU_ONLY = "cpu_only" # No GPU available
+ APPLE_SILICON = "apple_silicon" # M1/M2/M3 with MPS
+
+
+@dataclass
+class GPUInfo:
+ """Information about a GPU device"""
+ index: int
+ name: str
+ total_memory_gb: float
+ free_memory_gb: float
+ compute_capability: Optional[str] = None
+
+
+@dataclass
+class SystemInfo:
+ """Complete system information"""
+ platform: str
+ python_version: str
+ torch_version: str
+ cuda_available: bool
+ cuda_version: Optional[str]
+ mps_available: bool
+ cpu_cores: int
+ ram_total_gb: float
+ ram_available_gb: float
+ gpus: List[GPUInfo]
+ capability: HardwareCapability
+ recommended_batch_size: int
+ max_model_size: str
+ warnings: List[str]
+
+
+class SystemChecker:
+ """Check system capabilities for quantization tasks"""
+
+ # Model size thresholds (in billions of parameters)
+ MODEL_SIZES = {
+ "tiny": 0.1, # ~100M params
+ "small": 0.5, # ~500M params
+ "medium": 1.0, # ~1B params
+ "large": 7.0, # ~7B params
+ "xlarge": 13.0, # ~13B params
+ "xxlarge": 70.0 # ~70B params
+ }
+
+ # Memory requirements per billion parameters (GB)
+ MEMORY_PER_BILLION_PARAMS = {
+ "fp32": 4.0,
+ "fp16": 2.0,
+ "int8": 1.0,
+ "int4": 0.5
+ }
+
+ def __init__(self):
+ self._system_info: Optional[SystemInfo] = None
+
+ def check(self, force_refresh: bool = False) -> SystemInfo:
+ """Perform full system check"""
+ if self._system_info is not None and not force_refresh:
+ return self._system_info
+
+ warnings = []
+ gpus = []
+
+ # Basic info
+ cuda_available = torch.cuda.is_available()
+ mps_available = hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
+
+ # CUDA version
+ cuda_version = None
+ if cuda_available:
+ cuda_version = torch.version.cuda
+
+ # GPU detection
+ if cuda_available:
+ try:
+ for i in range(torch.cuda.device_count()):
+ props = torch.cuda.get_device_properties(i)
+ total_mem = props.total_memory / (1024**3)
+ free_mem = (props.total_memory - torch.cuda.memory_reserved(i)) / (1024**3)
+
+ gpus.append(GPUInfo(
+ index=i,
+ name=props.name,
+ total_memory_gb=round(total_mem, 2),
+ free_memory_gb=round(free_mem, 2),
+ compute_capability=f"{props.major}.{props.minor}"
+ ))
+ except Exception as e:
+ warnings.append(f"Error detecting GPU: {str(e)}")
+
+ # RAM info
+ ram = psutil.virtual_memory()
+ ram_total_gb = ram.total / (1024**3)
+ ram_available_gb = ram.available / (1024**3)
+
+ # Determine capability
+ capability = self._determine_capability(gpus, mps_available, ram_total_gb)
+
+ # Recommendations
+ recommended_batch_size = self._get_recommended_batch_size(capability, gpus)
+ max_model_size = self._get_max_model_size(capability, gpus, ram_total_gb)
+
+ # Add warnings
+ if not cuda_available and not mps_available:
+ warnings.append("No GPU detected. Quantization will run on CPU (slower).")
+
+ if ram_available_gb < 8:
+ warnings.append(f"Low RAM available ({ram_available_gb:.1f}GB). Large models may fail.")
+
+ if gpus and gpus[0].free_memory_gb < 4:
+ warnings.append(f"Low GPU memory ({gpus[0].free_memory_gb:.1f}GB free). Consider smaller models.")
+
+ self._system_info = SystemInfo(
+ platform=platform.system(),
+ python_version=platform.python_version(),
+ torch_version=torch.__version__,
+ cuda_available=cuda_available,
+ cuda_version=cuda_version,
+ mps_available=mps_available,
+ cpu_cores=psutil.cpu_count(logical=False) or 1,
+ ram_total_gb=round(ram_total_gb, 2),
+ ram_available_gb=round(ram_available_gb, 2),
+ gpus=gpus,
+ capability=capability,
+ recommended_batch_size=recommended_batch_size,
+ max_model_size=max_model_size,
+ warnings=warnings
+ )
+
+ return self._system_info
+
+ def _determine_capability(self, gpus: List[GPUInfo], mps_available: bool,
+ ram_total_gb: float) -> HardwareCapability:
+ """Determine hardware capability level"""
+ if mps_available:
+ return HardwareCapability.APPLE_SILICON
+
+ if not gpus:
+ return HardwareCapability.CPU_ONLY
+
+ # Check if any GPU has >= 8GB VRAM
+ max_vram = max(gpu.total_memory_gb for gpu in gpus)
+
+ if max_vram >= 8:
+ return HardwareCapability.FULL_GPU
+ else:
+ return HardwareCapability.LIMITED_GPU
+
+ def _get_recommended_batch_size(self, capability: HardwareCapability,
+ gpus: List[GPUInfo]) -> int:
+ """Get recommended batch size based on hardware"""
+ if capability == HardwareCapability.CPU_ONLY:
+ return 1
+ elif capability == HardwareCapability.LIMITED_GPU:
+ return 4
+ elif capability == HardwareCapability.APPLE_SILICON:
+ return 8
+ else:
+ # Full GPU - scale with VRAM
+ if gpus:
+ vram = gpus[0].total_memory_gb
+ if vram >= 24:
+ return 32
+ elif vram >= 16:
+ return 16
+ elif vram >= 8:
+ return 8
+ return 8
+
+ def _get_max_model_size(self, capability: HardwareCapability,
+ gpus: List[GPUInfo], ram_gb: float) -> str:
+ """Get maximum recommended model size"""
+ if capability == HardwareCapability.CPU_ONLY:
+ # CPU-only: limited by RAM, very slow for large models
+ if ram_gb >= 32:
+ return "medium (1B)"
+ elif ram_gb >= 16:
+ return "small (500M)"
+ else:
+ return "tiny (100M)"
+
+ elif capability == HardwareCapability.LIMITED_GPU:
+ return "small (500M)"
+
+ elif capability == HardwareCapability.APPLE_SILICON:
+ # Apple Silicon: depends on unified memory
+ if ram_gb >= 32:
+ return "large (7B)"
+ elif ram_gb >= 16:
+ return "medium (1B)"
+ else:
+ return "small (500M)"
+
+ else: # FULL_GPU
+ if gpus:
+ vram = gpus[0].total_memory_gb
+ if vram >= 48:
+ return "xxlarge (70B)"
+ elif vram >= 24:
+ return "xlarge (13B)"
+ elif vram >= 16:
+ return "large (7B)"
+ elif vram >= 8:
+ return "medium (1B)"
+ return "medium (1B)"
+
+ def can_load_model(self, model_params_billions: float,
+ dtype: str = "fp16") -> Dict[str, Any]:
+ """Check if a specific model can be loaded"""
+ info = self.check()
+
+ memory_required = model_params_billions * self.MEMORY_PER_BILLION_PARAMS.get(dtype, 2.0)
+ memory_required *= 1.3 # 30% overhead for activations, optimizer, etc.
+
+ # Check GPU memory
+ gpu_ok = False
+ gpu_memory = 0
+ if info.gpus:
+ gpu_memory = info.gpus[0].free_memory_gb
+ gpu_ok = gpu_memory >= memory_required
+
+ # Check RAM
+ ram_ok = info.ram_available_gb >= memory_required
+
+ can_load = gpu_ok or (info.capability == HardwareCapability.CPU_ONLY and ram_ok)
+
+ return {
+ "can_load": can_load,
+ "memory_required_gb": round(memory_required, 2),
+ "gpu_available_gb": round(gpu_memory, 2) if info.gpus else 0,
+ "ram_available_gb": round(info.ram_available_gb, 2),
+ "recommended_device": "cuda" if gpu_ok else ("mps" if info.mps_available else "cpu"),
+ "warnings": [] if can_load else [
+ f"Model requires ~{memory_required:.1f}GB memory. " +
+ f"Available: GPU={gpu_memory:.1f}GB, RAM={info.ram_available_gb:.1f}GB"
+ ]
+ }
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert system info to dictionary"""
+ info = self.check()
+ return {
+ "platform": info.platform,
+ "python_version": info.python_version,
+ "torch_version": info.torch_version,
+ "cuda_available": info.cuda_available,
+ "cuda_version": info.cuda_version,
+ "mps_available": info.mps_available,
+ "cpu_cores": info.cpu_cores,
+ "ram_total_gb": info.ram_total_gb,
+ "ram_available_gb": info.ram_available_gb,
+ "gpus": [
+ {
+ "index": gpu.index,
+ "name": gpu.name,
+ "total_memory_gb": gpu.total_memory_gb,
+ "free_memory_gb": gpu.free_memory_gb,
+ "compute_capability": gpu.compute_capability
+ }
+ for gpu in info.gpus
+ ],
+ "capability": info.capability.value,
+ "recommended_batch_size": info.recommended_batch_size,
+ "max_model_size": info.max_model_size,
+ "warnings": info.warnings
+ }
+
+
+# Global instance
+system_checker = SystemChecker()
+
+
+def get_system_info() -> Dict[str, Any]:
+ """Get system information as dictionary"""
+ return system_checker.to_dict()
+
+
+def check_model_requirements(model_params_billions: float, dtype: str = "fp16") -> Dict[str, Any]:
+ """Check if system can handle a specific model"""
+ return system_checker.can_load_model(model_params_billions, dtype)
diff --git a/backend/core/visualization.py b/backend/core/visualization.py
new file mode 100644
index 0000000000000000000000000000000000000000..411c76c7234dc22e146d4ec1cf999ee949baff10
--- /dev/null
+++ b/backend/core/visualization.py
@@ -0,0 +1,277 @@
+"""
+Visualization utilities for weight matrices and quantization analysis.
+Generates chart data for frontend consumption.
+"""
+
+import torch
+import numpy as np
+from typing import Dict, Any, List, Tuple, Optional
+from dataclasses import dataclass
+import base64
+import io
+
+# Import matplotlib with non-interactive backend
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+from matplotlib.colors import TwoSlopeNorm
+
+
+@dataclass
+class ChartData:
+ """Data structure for chart rendering"""
+ chart_type: str
+ data: Dict[str, Any]
+ layout: Dict[str, Any]
+
+
+class Visualizer:
+ """Generate visualization data for weight matrices and quantization analysis"""
+
+ def __init__(self, max_display_size: int = 128):
+ """
+ Args:
+ max_display_size: Maximum dimension for heatmap display (downsampled if larger)
+ """
+ self.max_display_size = max_display_size
+
+ def weight_heatmap(self, weights: torch.Tensor, title: str = "Weight Matrix",
+ downsample: bool = True) -> ChartData:
+ """
+ Generate heatmap data for weight matrix visualization.
+ Returns Plotly-compatible data structure.
+ """
+ w = weights.detach().cpu().float().numpy()
+
+ # Downsample if too large
+ if downsample and (w.shape[0] > self.max_display_size or w.shape[1] > self.max_display_size):
+ w = self._downsample_2d(w, self.max_display_size)
+
+ # Calculate symmetric colorscale bounds - convert to Python float for JSON
+ vmax = float(max(abs(w.min()), abs(w.max())))
+
+ return ChartData(
+ chart_type="heatmap",
+ data={
+ "z": w.tolist(),
+ "colorscale": "RdBu_r",
+ "zmin": -vmax,
+ "zmax": vmax,
+ "zmid": 0
+ },
+ layout={
+ "title": title,
+ "xaxis": {"title": "Input Features"},
+ "yaxis": {"title": "Output Features"}
+ }
+ )
+
+ def weight_histogram(self, weights: torch.Tensor, title: str = "Weight Distribution",
+ bins: int = 50) -> ChartData:
+ """Generate histogram data for weight distribution"""
+ w = weights.detach().cpu().float().numpy().flatten()
+
+ hist, bin_edges = np.histogram(w, bins=bins)
+ bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
+
+ return ChartData(
+ chart_type="bar",
+ data={
+ "x": bin_centers.tolist(),
+ "y": hist.tolist(),
+ "type": "bar"
+ },
+ layout={
+ "title": title,
+ "xaxis": {"title": "Weight Value"},
+ "yaxis": {"title": "Frequency"},
+ "bargap": 0.05
+ }
+ )
+
+ def error_heatmap(self, original: torch.Tensor, quantized: torch.Tensor,
+ scales: torch.Tensor, title: str = "Quantization Error") -> ChartData:
+ """Generate error heatmap between original and dequantized weights"""
+ orig = original.detach().cpu().float()
+ quant = quantized.detach().cpu().float()
+ sc = scales.detach().cpu().float()
+
+ # Dequantize
+ if sc.dim() == 1:
+ dequant = quant * sc.unsqueeze(1)
+ else:
+ # Group quantization - expand scales
+ dequant = quant * sc.unsqueeze(-1)
+ dequant = dequant.reshape(orig.shape)
+
+ error = (orig - dequant).abs().numpy()
+
+ # Downsample if needed
+ if error.shape[0] > self.max_display_size or error.shape[1] > self.max_display_size:
+ error = self._downsample_2d(error, self.max_display_size)
+
+ return ChartData(
+ chart_type="heatmap",
+ data={
+ "z": error.tolist(),
+ "colorscale": "Reds",
+ "zmin": 0
+ },
+ layout={
+ "title": title,
+ "xaxis": {"title": "Input Features"},
+ "yaxis": {"title": "Output Features"}
+ }
+ )
+
+ def comparison_overlay(self, original: torch.Tensor, dequantized: torch.Tensor,
+ sample_size: int = 1000) -> ChartData:
+ """Generate scatter plot comparing original vs dequantized values"""
+ orig = original.detach().cpu().float().numpy().flatten()
+ deq = dequantized.detach().cpu().float().numpy().flatten()
+
+ # Sample if too large
+ if len(orig) > sample_size:
+ indices = np.random.choice(len(orig), sample_size, replace=False)
+ orig = orig[indices]
+ deq = deq[indices]
+
+ return ChartData(
+ chart_type="scatter",
+ data={
+ "x": orig.tolist(),
+ "y": deq.tolist(),
+ "mode": "markers",
+ "marker": {"size": 3, "opacity": 0.5}
+ },
+ layout={
+ "title": "Original vs Dequantized Weights",
+ "xaxis": {"title": "Original Value"},
+ "yaxis": {"title": "Dequantized Value"},
+ "shapes": [{
+ "type": "line",
+ "x0": float(orig.min()),
+ "x1": float(orig.max()),
+ "y0": float(orig.min()),
+ "y1": float(orig.max()),
+ "line": {"color": "red", "dash": "dash"}
+ }]
+ }
+ )
+
+ def scales_histogram(self, scales: torch.Tensor,
+ title: str = "Quantization Scales Distribution") -> ChartData:
+ """Generate histogram of quantization scales"""
+ s = scales.detach().cpu().float().numpy().flatten()
+
+ hist, bin_edges = np.histogram(s, bins=30)
+ bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
+
+ return ChartData(
+ chart_type="bar",
+ data={
+ "x": bin_centers.tolist(),
+ "y": hist.tolist(),
+ "marker": {"color": "green"}
+ },
+ layout={
+ "title": title,
+ "xaxis": {"title": "Scale Value"},
+ "yaxis": {"title": "Frequency"}
+ }
+ )
+
+ def layer_error_bar(self, layer_errors: Dict[str, float],
+ title: str = "Quantization Error by Layer") -> ChartData:
+ """Generate bar chart of errors per layer"""
+ layers = list(layer_errors.keys())
+ errors = list(layer_errors.values())
+
+ return ChartData(
+ chart_type="bar",
+ data={
+ "x": layers,
+ "y": errors,
+ "marker": {"color": "coral"}
+ },
+ layout={
+ "title": title,
+ "xaxis": {"title": "Layer", "tickangle": 45},
+ "yaxis": {"title": "Mean Absolute Error"}
+ }
+ )
+
+ def memory_comparison(self, original_mb: float, quantized_mb: float,
+ overhead_mb: float = 0) -> ChartData:
+ """Generate memory comparison chart"""
+ return ChartData(
+ chart_type="bar",
+ data={
+ "x": ["Original (FP32)", "Quantized + Scales", "Savings"],
+ "y": [original_mb, quantized_mb + overhead_mb, original_mb - quantized_mb - overhead_mb],
+ "marker": {"color": ["#3498db", "#2ecc71", "#e74c3c"]}
+ },
+ layout={
+ "title": "Memory Usage Comparison",
+ "yaxis": {"title": "Memory (MB)"}
+ }
+ )
+
+ def _downsample_2d(self, arr: np.ndarray, max_size: int) -> np.ndarray:
+ """Downsample 2D array to max_size x max_size"""
+ h, w = arr.shape
+
+ if h > max_size:
+ step_h = h // max_size
+ arr = arr[::step_h, :][:max_size, :]
+
+ if w > max_size:
+ step_w = w // max_size
+ arr = arr[:, ::step_w][:, :max_size]
+
+ return arr
+
+ def generate_png(self, weights: torch.Tensor, title: str = "Weights") -> bytes:
+ """Generate PNG image bytes (for backward compatibility)"""
+ w = weights.detach().cpu().float().numpy()
+
+ if w.shape[0] > self.max_display_size or w.shape[1] > self.max_display_size:
+ w = self._downsample_2d(w, self.max_display_size)
+
+ fig, ax = plt.subplots(figsize=(10, 8))
+
+ vmax = max(abs(w.min()), abs(w.max()))
+ norm = TwoSlopeNorm(vmin=-vmax, vcenter=0, vmax=vmax)
+
+ im = ax.imshow(w, cmap='RdBu_r', norm=norm)
+ plt.colorbar(im, label='Weight Value')
+ ax.set_title(title)
+
+ buf = io.BytesIO()
+ plt.savefig(buf, format='png', bbox_inches='tight')
+ plt.close(fig)
+ buf.seek(0)
+
+ return buf.getvalue()
+
+ def to_dict(self, chart: ChartData) -> Dict[str, Any]:
+ """Convert ChartData to dictionary"""
+ return {
+ "type": chart.chart_type,
+ "data": chart.data,
+ "layout": chart.layout
+ }
+
+
+# Global instance
+visualizer = Visualizer()
+
+
+def get_weight_heatmap(weights: torch.Tensor, title: str = "Weights") -> Dict[str, Any]:
+ """Generate weight heatmap data"""
+ return visualizer.to_dict(visualizer.weight_heatmap(weights, title))
+
+
+def get_weight_histogram(weights: torch.Tensor, title: str = "Distribution") -> Dict[str, Any]:
+ """Generate weight histogram data"""
+ return visualizer.to_dict(visualizer.weight_histogram(weights, title))
diff --git a/backend/requirements.txt b/backend/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..03a1689d8b8f37d1ef3f3f40767eee66a4381223
--- /dev/null
+++ b/backend/requirements.txt
@@ -0,0 +1,11 @@
+fastapi>=0.100.0
+uvicorn>=0.23.0
+python-multipart>=0.0.6
+torch>=2.0.0
+transformers>=4.31.0
+accelerate>=0.21.0
+bitsandbytes>=0.40.0
+scipy>=1.11.0
+numpy>=1.24.0
+pydantic>=2.0.0
+jinja2>=3.1.2
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3b009839a6e1e024f876fd0d541ccdcca8a6fd64
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,78 @@
+# Docker Compose for local development with GPU support
+
+services:
+ # Full application (frontend + backend)
+ app:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ ports:
+ - "7860:7860"
+ environment:
+ - CUDA_VISIBLE_DEVICES=0
+ volumes:
+ - ./models:/app/models
+ - ./cache:/app/cache
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ count: 1
+ capabilities: [ gpu ]
+ restart: unless-stopped
+
+ # Development mode: separate frontend and backend
+ backend-dev:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ target: python-base
+ command: python -m uvicorn backend.api.main:app --host 0.0.0.0 --port 8000 --reload
+ ports:
+ - "8000:8000"
+ volumes:
+ - ./backend:/app/backend
+ - ./models:/app/models
+ environment:
+ - CUDA_VISIBLE_DEVICES=0
+ profiles:
+ - dev
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ count: 1
+ capabilities: [ gpu ]
+
+ frontend-dev:
+ image: node:20-alpine
+ working_dir: /app
+ command: sh -c "npm install && npm run dev -- --host"
+ ports:
+ - "5173:5173"
+ volumes:
+ - ./frontend:/app
+ - /app/node_modules
+ environment:
+ - VITE_API_URL=http://localhost:8000/api
+ profiles:
+ - dev
+
+ # CPU-only version (no GPU)
+ app-cpu:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ ports:
+ - "7860:7860"
+ volumes:
+ - ./models:/app/models
+ profiles:
+ - cpu
+ restart: unless-stopped
+
+networks:
+ default:
+ name: quantizer-network
diff --git a/frontend/.gitignore b/frontend/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a547bf36d8d11a4f89c59c144f24795749086dd1
--- /dev/null
+++ b/frontend/.gitignore
@@ -0,0 +1,24 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
diff --git a/frontend/README.md b/frontend/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..18bc70ebe277fbfe6e55e6f9a0ae7e2c3e4bdd83
--- /dev/null
+++ b/frontend/README.md
@@ -0,0 +1,16 @@
+# React + Vite
+
+This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
+
+Currently, two official plugins are available:
+
+- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
+- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
+
+## React Compiler
+
+The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
+
+## Expanding the ESLint configuration
+
+If you are developing a production application, we recommend using TypeScript with type-aware lint rules enabled. Check out the [TS template](https://github.com/vitejs/vite/tree/main/packages/create-vite/template-react-ts) for information on how to integrate TypeScript and [`typescript-eslint`](https://typescript-eslint.io) in your project.
diff --git a/frontend/eslint.config.js b/frontend/eslint.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..4fa125da29e01fa85529cfa06a83a7c0ce240d55
--- /dev/null
+++ b/frontend/eslint.config.js
@@ -0,0 +1,29 @@
+import js from '@eslint/js'
+import globals from 'globals'
+import reactHooks from 'eslint-plugin-react-hooks'
+import reactRefresh from 'eslint-plugin-react-refresh'
+import { defineConfig, globalIgnores } from 'eslint/config'
+
+export default defineConfig([
+ globalIgnores(['dist']),
+ {
+ files: ['**/*.{js,jsx}'],
+ extends: [
+ js.configs.recommended,
+ reactHooks.configs.flat.recommended,
+ reactRefresh.configs.vite,
+ ],
+ languageOptions: {
+ ecmaVersion: 2020,
+ globals: globals.browser,
+ parserOptions: {
+ ecmaVersion: 'latest',
+ ecmaFeatures: { jsx: true },
+ sourceType: 'module',
+ },
+ },
+ rules: {
+ 'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
+ },
+ },
+])
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..c20fbd3a70cc5c113cd0498fbd28c49abfe0bf7d
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+ frontend
+
+
+
+
+
+
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..d37dee9870c7afc54fdde5799a7f4785bcec7d47
--- /dev/null
+++ b/frontend/package-lock.json
@@ -0,0 +1,3858 @@
+{
+ "name": "frontend",
+ "version": "0.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "frontend",
+ "version": "0.0.0",
+ "dependencies": {
+ "axios": "^1.13.2",
+ "framer-motion": "^12.26.1",
+ "lucide-react": "^0.562.0",
+ "react": "^19.2.0",
+ "react-dom": "^19.2.0",
+ "react-hot-toast": "^2.6.0",
+ "react-router-dom": "^7.12.0",
+ "recharts": "^3.6.0",
+ "zustand": "^5.0.10"
+ },
+ "devDependencies": {
+ "@eslint/js": "^9.39.1",
+ "@types/react": "^19.2.5",
+ "@types/react-dom": "^19.2.3",
+ "@vitejs/plugin-react": "^5.1.1",
+ "buffer": "^6.0.3",
+ "eslint": "^9.39.1",
+ "eslint-plugin-react-hooks": "^7.0.1",
+ "eslint-plugin-react-refresh": "^0.4.24",
+ "globals": "^16.5.0",
+ "vite": "^7.2.4"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.28.6.tgz",
+ "integrity": "sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.28.5",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.6.tgz",
+ "integrity": "sha512-2lfu57JtzctfIrcGMz992hyLlByuzgIk58+hhGCxjKZ3rWI82NnVLjXcaTqkI2NvlcvOskZaiZ5kjUALo3Lpxg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.6.tgz",
+ "integrity": "sha512-H3mcG6ZDLTlYfaSNi0iOKkigqMFvkTKlGUYlD8GW7nNOYRrevuA46iTypPyv+06V3fEmvvazfntkBU34L0azAw==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.28.6",
+ "@babel/generator": "^7.28.6",
+ "@babel/helper-compilation-targets": "^7.28.6",
+ "@babel/helper-module-transforms": "^7.28.6",
+ "@babel/helpers": "^7.28.6",
+ "@babel/parser": "^7.28.6",
+ "@babel/template": "^7.28.6",
+ "@babel/traverse": "^7.28.6",
+ "@babel/types": "^7.28.6",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.6.tgz",
+ "integrity": "sha512-lOoVRwADj8hjf7al89tvQ2a1lf53Z+7tiXMgpZJL3maQPDxh0DgLMN62B2MKUOFcoodBHLMbDM6WAbKgNy5Suw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.6",
+ "@babel/types": "^7.28.6",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz",
+ "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.28.6",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz",
+ "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/traverse": "^7.28.6",
+ "@babel/types": "^7.28.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz",
+ "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.28.6",
+ "@babel/helper-validator-identifier": "^7.28.5",
+ "@babel/traverse": "^7.28.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz",
+ "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz",
+ "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.28.6",
+ "@babel/types": "^7.28.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.6.tgz",
+ "integrity": "sha512-TeR9zWR18BvbfPmGbLampPMW+uW1NZnJlRuuHso8i87QZNq2JRF9i6RgxRqtEq+wQGsS19NNTWr2duhnE49mfQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.6"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-self": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
+ "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-source": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
+ "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz",
+ "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.28.6",
+ "@babel/parser": "^7.28.6",
+ "@babel/types": "^7.28.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.6.tgz",
+ "integrity": "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.28.6",
+ "@babel/generator": "^7.28.6",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.6",
+ "@babel/template": "^7.28.6",
+ "@babel/types": "^7.28.6",
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.28.6",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.6.tgz",
+ "integrity": "sha512-0ZrskXVEHSWIqZM/sQZ4EV3jZJXRkio/WCxaqKZP1g//CEWEPSfeZFcms4XeKBCHU0ZKnIkdJeU/kF+eRp5lBg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz",
+ "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz",
+ "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz",
+ "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz",
+ "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz",
+ "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz",
+ "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz",
+ "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz",
+ "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz",
+ "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz",
+ "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz",
+ "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz",
+ "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz",
+ "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz",
+ "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz",
+ "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz",
+ "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz",
+ "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz",
+ "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz",
+ "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz",
+ "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz",
+ "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openharmony-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz",
+ "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz",
+ "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz",
+ "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz",
+ "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz",
+ "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@eslint-community/eslint-utils": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz",
+ "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "eslint-visitor-keys": "^3.4.3"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0"
+ }
+ },
+ "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+ "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@eslint-community/regexpp": {
+ "version": "4.12.2",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz",
+ "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^12.0.0 || ^14.0.0 || >=16.0.0"
+ }
+ },
+ "node_modules/@eslint/config-array": {
+ "version": "0.21.1",
+ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz",
+ "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@eslint/object-schema": "^2.1.7",
+ "debug": "^4.3.1",
+ "minimatch": "^3.1.2"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/config-helpers": {
+ "version": "0.4.2",
+ "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz",
+ "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@eslint/core": "^0.17.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/core": {
+ "version": "0.17.0",
+ "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz",
+ "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/json-schema": "^7.0.15"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/eslintrc": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz",
+ "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ajv": "^6.12.4",
+ "debug": "^4.3.2",
+ "espree": "^10.0.1",
+ "globals": "^14.0.0",
+ "ignore": "^5.2.0",
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^4.1.1",
+ "minimatch": "^3.1.2",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@eslint/eslintrc/node_modules/globals": {
+ "version": "14.0.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz",
+ "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@eslint/js": {
+ "version": "9.39.2",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz",
+ "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://eslint.org/donate"
+ }
+ },
+ "node_modules/@eslint/object-schema": {
+ "version": "2.1.7",
+ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz",
+ "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/plugin-kit": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz",
+ "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@eslint/core": "^0.17.0",
+ "levn": "^0.4.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@humanfs/core": {
+ "version": "0.19.1",
+ "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
+ "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=18.18.0"
+ }
+ },
+ "node_modules/@humanfs/node": {
+ "version": "0.16.7",
+ "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz",
+ "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@humanfs/core": "^0.19.1",
+ "@humanwhocodes/retry": "^0.4.0"
+ },
+ "engines": {
+ "node": ">=18.18.0"
+ }
+ },
+ "node_modules/@humanwhocodes/module-importer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
+ "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=12.22"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
+ }
+ },
+ "node_modules/@humanwhocodes/retry": {
+ "version": "0.4.3",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz",
+ "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=18.18"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@reduxjs/toolkit": {
+ "version": "2.11.2",
+ "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.11.2.tgz",
+ "integrity": "sha512-Kd6kAHTA6/nUpp8mySPqj3en3dm0tdMIgbttnQ1xFMVpufoj+ADi8pXLBsd4xzTRHQa7t/Jv8W5UnCuW4kuWMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@standard-schema/spec": "^1.0.0",
+ "@standard-schema/utils": "^0.3.0",
+ "immer": "^11.0.0",
+ "redux": "^5.0.1",
+ "redux-thunk": "^3.1.0",
+ "reselect": "^5.1.0"
+ },
+ "peerDependencies": {
+ "react": "^16.9.0 || ^17.0.0 || ^18 || ^19",
+ "react-redux": "^7.2.1 || ^8.1.3 || ^9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "react": {
+ "optional": true
+ },
+ "react-redux": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@reduxjs/toolkit/node_modules/immer": {
+ "version": "11.1.3",
+ "resolved": "https://registry.npmjs.org/immer/-/immer-11.1.3.tgz",
+ "integrity": "sha512-6jQTc5z0KJFtr1UgFpIL3N9XSC3saRaI9PwWtzM2pSqkNGtiNkYY2OSwkOGDK2XcTRcLb1pi/aNkKZz0nxVH4Q==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/immer"
+ }
+ },
+ "node_modules/@rolldown/pluginutils": {
+ "version": "1.0.0-beta.53",
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.53.tgz",
+ "integrity": "sha512-vENRlFU4YbrwVqNDZ7fLvy+JR1CRkyr01jhSiDpE1u6py3OMzQfztQU2jxykW3ALNxO4kSlqIDeYyD0Y9RcQeQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz",
+ "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz",
+ "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz",
+ "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz",
+ "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz",
+ "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz",
+ "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz",
+ "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz",
+ "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz",
+ "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz",
+ "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz",
+ "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz",
+ "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz",
+ "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz",
+ "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz",
+ "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz",
+ "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz",
+ "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz",
+ "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz",
+ "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openbsd-x64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz",
+ "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz",
+ "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz",
+ "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz",
+ "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz",
+ "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz",
+ "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@standard-schema/spec": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz",
+ "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==",
+ "license": "MIT"
+ },
+ "node_modules/@standard-schema/utils": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/utils/-/utils-0.3.0.tgz",
+ "integrity": "sha512-e7Mew686owMaPJVNNLs55PUvgz371nKgwsc4vxE49zsODpJEnxgxRo2y/OKrqueavXgZNMDVj3DdHFlaSAeU8g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
+ "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+ "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
+ "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+ "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz",
+ "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
+ "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz",
+ "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
+ "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
+ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/json-schema": {
+ "version": "7.0.15",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
+ "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/react": {
+ "version": "19.2.8",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.8.tgz",
+ "integrity": "sha512-3MbSL37jEchWZz2p2mjntRZtPt837ij10ApxKfgmXCTuHWagYg7iA5bqPw6C8BMPfwidlvfPI/fxOc42HLhcyg==",
+ "devOptional": true,
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "csstype": "^3.2.2"
+ }
+ },
+ "node_modules/@types/react-dom": {
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz",
+ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
+ "dev": true,
+ "license": "MIT",
+ "peerDependencies": {
+ "@types/react": "^19.2.0"
+ }
+ },
+ "node_modules/@types/use-sync-external-store": {
+ "version": "0.0.6",
+ "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz",
+ "integrity": "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==",
+ "license": "MIT"
+ },
+ "node_modules/@vitejs/plugin-react": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.2.tgz",
+ "integrity": "sha512-EcA07pHJouywpzsoTUqNh5NwGayl2PPVEJKUSinGGSxFGYn+shYbqMGBg6FXDqgXum9Ou/ecb+411ssw8HImJQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.28.5",
+ "@babel/plugin-transform-react-jsx-self": "^7.27.1",
+ "@babel/plugin-transform-react-jsx-source": "^7.27.1",
+ "@rolldown/pluginutils": "1.0.0-beta.53",
+ "@types/babel__core": "^7.20.5",
+ "react-refresh": "^0.18.0"
+ },
+ "engines": {
+ "node": "^20.19.0 || >=22.12.0"
+ },
+ "peerDependencies": {
+ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "8.15.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
+ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "dev": true,
+ "license": "MIT",
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true,
+ "license": "Python-2.0"
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/axios": {
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz",
+ "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==",
+ "license": "MIT",
+ "dependencies": {
+ "follow-redirects": "^1.15.6",
+ "form-data": "^4.0.4",
+ "proxy-from-env": "^1.1.0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/base64-js": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
+ "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.9.14",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz",
+ "integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
+ "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "baseline-browser-mapping": "^2.9.0",
+ "caniuse-lite": "^1.0.30001759",
+ "electron-to-chromium": "^1.5.263",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.2.0"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/buffer": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz",
+ "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "base64-js": "^1.3.1",
+ "ieee754": "^1.2.1"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001764",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001764.tgz",
+ "integrity": "sha512-9JGuzl2M+vPL+pz70gtMF9sHdMFbY9FJaQBi186cHKH3pSzDvzoUJUPV6fqiKIMyXbud9ZLg4F3Yza1vJ1+93g==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/clsx": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
+ "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cookie": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/d3-array": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
+ "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
+ "license": "ISC",
+ "dependencies": {
+ "internmap": "1 - 2"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-format": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
+ "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-path": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
+ "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2.10.0 - 3",
+ "d3-format": "1 - 3",
+ "d3-interpolate": "1.2.0 - 3",
+ "d3-time": "2.1.1 - 3",
+ "d3-time-format": "2 - 4"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-shape": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
+ "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-path": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
+ "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time-format": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
+ "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-time": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decimal.js-light": {
+ "version": "2.5.1",
+ "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz",
+ "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==",
+ "license": "MIT"
+ },
+ "node_modules/deep-is": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.267",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz",
+ "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-toolkit": {
+ "version": "1.43.0",
+ "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.43.0.tgz",
+ "integrity": "sha512-SKCT8AsWvYzBBuUqMk4NPwFlSdqLpJwmy6AP322ERn8W2YLIB6JBXnwMI2Qsh2gfphT3q7EKAxKb23cvFHFwKA==",
+ "license": "MIT",
+ "workspaces": [
+ "docs",
+ "benchmarks"
+ ]
+ },
+ "node_modules/esbuild": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz",
+ "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.27.2",
+ "@esbuild/android-arm": "0.27.2",
+ "@esbuild/android-arm64": "0.27.2",
+ "@esbuild/android-x64": "0.27.2",
+ "@esbuild/darwin-arm64": "0.27.2",
+ "@esbuild/darwin-x64": "0.27.2",
+ "@esbuild/freebsd-arm64": "0.27.2",
+ "@esbuild/freebsd-x64": "0.27.2",
+ "@esbuild/linux-arm": "0.27.2",
+ "@esbuild/linux-arm64": "0.27.2",
+ "@esbuild/linux-ia32": "0.27.2",
+ "@esbuild/linux-loong64": "0.27.2",
+ "@esbuild/linux-mips64el": "0.27.2",
+ "@esbuild/linux-ppc64": "0.27.2",
+ "@esbuild/linux-riscv64": "0.27.2",
+ "@esbuild/linux-s390x": "0.27.2",
+ "@esbuild/linux-x64": "0.27.2",
+ "@esbuild/netbsd-arm64": "0.27.2",
+ "@esbuild/netbsd-x64": "0.27.2",
+ "@esbuild/openbsd-arm64": "0.27.2",
+ "@esbuild/openbsd-x64": "0.27.2",
+ "@esbuild/openharmony-arm64": "0.27.2",
+ "@esbuild/sunos-x64": "0.27.2",
+ "@esbuild/win32-arm64": "0.27.2",
+ "@esbuild/win32-ia32": "0.27.2",
+ "@esbuild/win32-x64": "0.27.2"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint": {
+ "version": "9.39.2",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz",
+ "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.8.0",
+ "@eslint-community/regexpp": "^4.12.1",
+ "@eslint/config-array": "^0.21.1",
+ "@eslint/config-helpers": "^0.4.2",
+ "@eslint/core": "^0.17.0",
+ "@eslint/eslintrc": "^3.3.1",
+ "@eslint/js": "9.39.2",
+ "@eslint/plugin-kit": "^0.4.1",
+ "@humanfs/node": "^0.16.6",
+ "@humanwhocodes/module-importer": "^1.0.1",
+ "@humanwhocodes/retry": "^0.4.2",
+ "@types/estree": "^1.0.6",
+ "ajv": "^6.12.4",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.6",
+ "debug": "^4.3.2",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^8.4.0",
+ "eslint-visitor-keys": "^4.2.1",
+ "espree": "^10.4.0",
+ "esquery": "^1.5.0",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^8.0.0",
+ "find-up": "^5.0.0",
+ "glob-parent": "^6.0.2",
+ "ignore": "^5.2.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.1.2",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.3"
+ },
+ "bin": {
+ "eslint": "bin/eslint.js"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://eslint.org/donate"
+ },
+ "peerDependencies": {
+ "jiti": "*"
+ },
+ "peerDependenciesMeta": {
+ "jiti": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-plugin-react-hooks": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz",
+ "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.24.4",
+ "@babel/parser": "^7.24.4",
+ "hermes-parser": "^0.25.1",
+ "zod": "^3.25.0 || ^4.0.0",
+ "zod-validation-error": "^3.5.0 || ^4.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-react-refresh": {
+ "version": "0.4.26",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.26.tgz",
+ "integrity": "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==",
+ "dev": true,
+ "license": "MIT",
+ "peerDependencies": {
+ "eslint": ">=8.40"
+ }
+ },
+ "node_modules/eslint-scope": {
+ "version": "8.4.0",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz",
+ "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint-visitor-keys": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz",
+ "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/espree": {
+ "version": "10.4.0",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz",
+ "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "acorn": "^8.15.0",
+ "acorn-jsx": "^5.3.2",
+ "eslint-visitor-keys": "^4.2.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/esquery": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz",
+ "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "estraverse": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eventemitter3": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz",
+ "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==",
+ "license": "MIT"
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/file-entry-cache": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz",
+ "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "flat-cache": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=16.0.0"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+ "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "locate-path": "^6.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/flat-cache": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz",
+ "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "flatted": "^3.2.9",
+ "keyv": "^4.5.4"
+ },
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/flatted": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz",
+ "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/follow-redirects": {
+ "version": "1.15.11",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
+ "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/framer-motion": {
+ "version": "12.26.1",
+ "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.26.1.tgz",
+ "integrity": "sha512-Uzc8wGldU4FpmGotthjjcj0SZhigcODjqvKT7lzVZHsmYkzQMFfMIv0vHQoXCeoe/Ahxqp4by4A6QbzFA/lblw==",
+ "license": "MIT",
+ "dependencies": {
+ "motion-dom": "^12.24.11",
+ "motion-utils": "^12.24.10",
+ "tslib": "^2.4.0"
+ },
+ "peerDependencies": {
+ "@emotion/is-prop-valid": "*",
+ "react": "^18.0.0 || ^19.0.0",
+ "react-dom": "^18.0.0 || ^19.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@emotion/is-prop-valid": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ },
+ "react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/globals": {
+ "version": "16.5.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz",
+ "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/goober": {
+ "version": "2.1.18",
+ "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.18.tgz",
+ "integrity": "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==",
+ "license": "MIT",
+ "peerDependencies": {
+ "csstype": "^3.0.10"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/hermes-estree": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz",
+ "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/hermes-parser": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz",
+ "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "hermes-estree": "0.25.1"
+ }
+ },
+ "node_modules/ieee754": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
+ "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/ignore": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz",
+ "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/immer": {
+ "version": "10.2.0",
+ "resolved": "https://registry.npmjs.org/immer/-/immer-10.2.0.tgz",
+ "integrity": "sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/immer"
+ }
+ },
+ "node_modules/import-fresh": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz",
+ "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/internmap": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
+ "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/js-yaml": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json-buffer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
+ "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/keyv": {
+ "version": "4.5.4",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
+ "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "json-buffer": "3.0.1"
+ }
+ },
+ "node_modules/levn": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+ "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "prelude-ls": "^1.2.1",
+ "type-check": "~0.4.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-locate": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/lucide-react": {
+ "version": "0.562.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz",
+ "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==",
+ "license": "ISC",
+ "peerDependencies": {
+ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/motion-dom": {
+ "version": "12.24.11",
+ "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.24.11.tgz",
+ "integrity": "sha512-DlWOmsXMJrV8lzZyd+LKjG2CXULUs++bkq8GZ2Sr0R0RRhs30K2wtY+LKiTjhmJU3W61HK+rB0GLz6XmPvTA1A==",
+ "license": "MIT",
+ "dependencies": {
+ "motion-utils": "^12.24.10"
+ }
+ },
+ "node_modules/motion-utils": {
+ "version": "12.24.10",
+ "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.24.10.tgz",
+ "integrity": "sha512-x5TFgkCIP4pPsRLpKoI86jv/q8t8FQOiM/0E8QKBzfMozWHfkKap2gA1hOki+B5g3IsBNpxbUnfOum1+dgvYww==",
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/optionator": {
+ "version": "0.9.4",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
+ "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "deep-is": "^0.1.3",
+ "fast-levenshtein": "^2.0.6",
+ "levn": "^0.4.1",
+ "prelude-ls": "^1.2.1",
+ "type-check": "^0.4.0",
+ "word-wrap": "^1.2.5"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+ "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-limit": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "callsites": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/prelude-ls": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+ "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/proxy-from-env": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
+ "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
+ "license": "MIT"
+ },
+ "node_modules/punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/react": {
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz",
+ "integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==",
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz",
+ "integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==",
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "scheduler": "^0.27.0"
+ },
+ "peerDependencies": {
+ "react": "^19.2.3"
+ }
+ },
+ "node_modules/react-hot-toast": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/react-hot-toast/-/react-hot-toast-2.6.0.tgz",
+ "integrity": "sha512-bH+2EBMZ4sdyou/DPrfgIouFpcRLCJ+HoCA32UoAYHn6T3Ur5yfcDCeSr5mwldl6pFOsiocmrXMuoCJ1vV8bWg==",
+ "license": "MIT",
+ "dependencies": {
+ "csstype": "^3.1.3",
+ "goober": "^2.1.16"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "react": ">=16",
+ "react-dom": ">=16"
+ }
+ },
+ "node_modules/react-is": {
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-19.2.3.tgz",
+ "integrity": "sha512-qJNJfu81ByyabuG7hPFEbXqNcWSU3+eVus+KJs+0ncpGfMyYdvSmxiJxbWR65lYi1I+/0HBcliO029gc4F+PnA==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/react-redux": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.2.0.tgz",
+ "integrity": "sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==",
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "@types/use-sync-external-store": "^0.0.6",
+ "use-sync-external-store": "^1.4.0"
+ },
+ "peerDependencies": {
+ "@types/react": "^18.2.25 || ^19",
+ "react": "^18.0 || ^19",
+ "redux": "^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "redux": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-refresh": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz",
+ "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-router": {
+ "version": "7.12.0",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.12.0.tgz",
+ "integrity": "sha512-kTPDYPFzDVGIIGNLS5VJykK0HfHLY5MF3b+xj0/tTyNYL1gF1qs7u67Z9jEhQk2sQ98SUaHxlG31g1JtF7IfVw==",
+ "license": "MIT",
+ "dependencies": {
+ "cookie": "^1.0.1",
+ "set-cookie-parser": "^2.6.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=18",
+ "react-dom": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-router-dom": {
+ "version": "7.12.0",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.12.0.tgz",
+ "integrity": "sha512-pfO9fiBcpEfX4Tx+iTYKDtPbrSLLCbwJ5EqP+SPYQu1VYCXdy79GSj0wttR0U4cikVdlImZuEZ/9ZNCgoaxwBA==",
+ "license": "MIT",
+ "dependencies": {
+ "react-router": "7.12.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=18",
+ "react-dom": ">=18"
+ }
+ },
+ "node_modules/recharts": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/recharts/-/recharts-3.6.0.tgz",
+ "integrity": "sha512-L5bjxvQRAe26RlToBAziKUB7whaGKEwD3znoM6fz3DrTowCIC/FnJYnuq1GEzB8Zv2kdTfaxQfi5GoH0tBinyg==",
+ "license": "MIT",
+ "workspaces": [
+ "www"
+ ],
+ "dependencies": {
+ "@reduxjs/toolkit": "1.x.x || 2.x.x",
+ "clsx": "^2.1.1",
+ "decimal.js-light": "^2.5.1",
+ "es-toolkit": "^1.39.3",
+ "eventemitter3": "^5.0.1",
+ "immer": "^10.1.1",
+ "react-redux": "8.x.x || 9.x.x",
+ "reselect": "5.1.1",
+ "tiny-invariant": "^1.3.3",
+ "use-sync-external-store": "^1.2.2",
+ "victory-vendor": "^37.0.2"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+ "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+ "react-is": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/redux": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/redux/-/redux-5.0.1.tgz",
+ "integrity": "sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/redux-thunk": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/redux-thunk/-/redux-thunk-3.1.0.tgz",
+ "integrity": "sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw==",
+ "license": "MIT",
+ "peerDependencies": {
+ "redux": "^5.0.0"
+ }
+ },
+ "node_modules/reselect": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz",
+ "integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==",
+ "license": "MIT"
+ },
+ "node_modules/resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz",
+ "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.55.1",
+ "@rollup/rollup-android-arm64": "4.55.1",
+ "@rollup/rollup-darwin-arm64": "4.55.1",
+ "@rollup/rollup-darwin-x64": "4.55.1",
+ "@rollup/rollup-freebsd-arm64": "4.55.1",
+ "@rollup/rollup-freebsd-x64": "4.55.1",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.55.1",
+ "@rollup/rollup-linux-arm-musleabihf": "4.55.1",
+ "@rollup/rollup-linux-arm64-gnu": "4.55.1",
+ "@rollup/rollup-linux-arm64-musl": "4.55.1",
+ "@rollup/rollup-linux-loong64-gnu": "4.55.1",
+ "@rollup/rollup-linux-loong64-musl": "4.55.1",
+ "@rollup/rollup-linux-ppc64-gnu": "4.55.1",
+ "@rollup/rollup-linux-ppc64-musl": "4.55.1",
+ "@rollup/rollup-linux-riscv64-gnu": "4.55.1",
+ "@rollup/rollup-linux-riscv64-musl": "4.55.1",
+ "@rollup/rollup-linux-s390x-gnu": "4.55.1",
+ "@rollup/rollup-linux-x64-gnu": "4.55.1",
+ "@rollup/rollup-linux-x64-musl": "4.55.1",
+ "@rollup/rollup-openbsd-x64": "4.55.1",
+ "@rollup/rollup-openharmony-arm64": "4.55.1",
+ "@rollup/rollup-win32-arm64-msvc": "4.55.1",
+ "@rollup/rollup-win32-ia32-msvc": "4.55.1",
+ "@rollup/rollup-win32-x64-gnu": "4.55.1",
+ "@rollup/rollup-win32-x64-msvc": "4.55.1",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.27.0",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
+ "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==",
+ "license": "MIT"
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/set-cookie-parser": {
+ "version": "2.7.2",
+ "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
+ "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==",
+ "license": "MIT"
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/tiny-invariant": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz",
+ "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==",
+ "license": "MIT"
+ },
+ "node_modules/tinyglobby": {
+ "version": "0.2.15",
+ "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
+ "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fdir": "^6.5.0",
+ "picomatch": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/SuperchupuDev"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+ "license": "0BSD"
+ },
+ "node_modules/type-check": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+ "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "prelude-ls": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz",
+ "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+ "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/victory-vendor": {
+ "version": "37.3.6",
+ "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-37.3.6.tgz",
+ "integrity": "sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ==",
+ "license": "MIT AND ISC",
+ "dependencies": {
+ "@types/d3-array": "^3.0.3",
+ "@types/d3-ease": "^3.0.0",
+ "@types/d3-interpolate": "^3.0.1",
+ "@types/d3-scale": "^4.0.2",
+ "@types/d3-shape": "^3.1.0",
+ "@types/d3-time": "^3.0.0",
+ "@types/d3-timer": "^3.0.0",
+ "d3-array": "^3.1.6",
+ "d3-ease": "^3.0.1",
+ "d3-interpolate": "^3.0.1",
+ "d3-scale": "^4.0.2",
+ "d3-shape": "^3.1.0",
+ "d3-time": "^3.0.0",
+ "d3-timer": "^3.0.1"
+ }
+ },
+ "node_modules/vite": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz",
+ "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "esbuild": "^0.27.0",
+ "fdir": "^6.5.0",
+ "picomatch": "^4.0.3",
+ "postcss": "^8.5.6",
+ "rollup": "^4.43.0",
+ "tinyglobby": "^0.2.15"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^20.19.0 || >=22.12.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^20.19.0 || >=22.12.0",
+ "jiti": ">=1.21.0",
+ "less": "^4.0.0",
+ "lightningcss": "^1.21.0",
+ "sass": "^1.70.0",
+ "sass-embedded": "^1.70.0",
+ "stylus": ">=0.54.8",
+ "sugarss": "^5.0.0",
+ "terser": "^5.16.0",
+ "tsx": "^4.8.1",
+ "yaml": "^2.4.2"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "jiti": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ },
+ "tsx": {
+ "optional": true
+ },
+ "yaml": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/word-wrap": {
+ "version": "1.2.5",
+ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz",
+ "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/zod": {
+ "version": "4.3.5",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.5.tgz",
+ "integrity": "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
+ "node_modules/zod-validation-error": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz",
+ "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "peerDependencies": {
+ "zod": "^3.25.0 || ^4.0.0"
+ }
+ },
+ "node_modules/zustand": {
+ "version": "5.0.10",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.10.tgz",
+ "integrity": "sha512-U1AiltS1O9hSy3rul+Ub82ut2fqIAefiSuwECWt6jlMVUGejvf+5omLcRBSzqbRagSM3hQZbtzdeRc6QVScXTg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.20.0"
+ },
+ "peerDependencies": {
+ "@types/react": ">=18.0.0",
+ "immer": ">=9.0.6",
+ "react": ">=18.0.0",
+ "use-sync-external-store": ">=1.2.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "immer": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ },
+ "use-sync-external-store": {
+ "optional": true
+ }
+ }
+ }
+ }
+}
diff --git a/frontend/package.json b/frontend/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..7d4e979aabac0e22c69e1de48bae83bb7f292a8e
--- /dev/null
+++ b/frontend/package.json
@@ -0,0 +1,35 @@
+{
+ "name": "frontend",
+ "private": true,
+ "version": "0.0.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite",
+ "build": "vite build",
+ "lint": "eslint .",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "axios": "^1.13.2",
+ "framer-motion": "^12.26.1",
+ "lucide-react": "^0.562.0",
+ "react": "^19.2.0",
+ "react-dom": "^19.2.0",
+ "react-hot-toast": "^2.6.0",
+ "react-router-dom": "^7.12.0",
+ "recharts": "^3.6.0",
+ "zustand": "^5.0.10"
+ },
+ "devDependencies": {
+ "@eslint/js": "^9.39.1",
+ "@types/react": "^19.2.5",
+ "@types/react-dom": "^19.2.3",
+ "@vitejs/plugin-react": "^5.1.1",
+ "buffer": "^6.0.3",
+ "eslint": "^9.39.1",
+ "eslint-plugin-react-hooks": "^7.0.1",
+ "eslint-plugin-react-refresh": "^0.4.24",
+ "globals": "^16.5.0",
+ "vite": "^7.2.4"
+ }
+}
diff --git a/frontend/public/vite.svg b/frontend/public/vite.svg
new file mode 100644
index 0000000000000000000000000000000000000000..e7b8dfb1b2a60bd50538bec9f876511b9cac21e3
--- /dev/null
+++ b/frontend/public/vite.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/src/App.css b/frontend/src/App.css
new file mode 100644
index 0000000000000000000000000000000000000000..b9d355df2a5956b526c004531b7b0ffe412461e0
--- /dev/null
+++ b/frontend/src/App.css
@@ -0,0 +1,42 @@
+#root {
+ max-width: 1280px;
+ margin: 0 auto;
+ padding: 2rem;
+ text-align: center;
+}
+
+.logo {
+ height: 6em;
+ padding: 1.5em;
+ will-change: filter;
+ transition: filter 300ms;
+}
+.logo:hover {
+ filter: drop-shadow(0 0 2em #646cffaa);
+}
+.logo.react:hover {
+ filter: drop-shadow(0 0 2em #61dafbaa);
+}
+
+@keyframes logo-spin {
+ from {
+ transform: rotate(0deg);
+ }
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+@media (prefers-reduced-motion: no-preference) {
+ a:nth-of-type(2) .logo {
+ animation: logo-spin infinite 20s linear;
+ }
+}
+
+.card {
+ padding: 2em;
+}
+
+.read-the-docs {
+ color: #888;
+}
diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..069a6ac39f430e161dec02099f848e6fd1f6f9e4
--- /dev/null
+++ b/frontend/src/App.jsx
@@ -0,0 +1,82 @@
+import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom';
+import { Toaster, toast } from 'react-hot-toast';
+import { useEffect } from 'react';
+import Layout from './components/Layout';
+import Dashboard from './pages/Dashboard';
+import Quantizer from './pages/Quantizer';
+import Analysis from './pages/Analysis';
+import ModelLoader from './pages/ModelLoader';
+import { useSystemStore } from './store';
+import './index.css';
+
+function App() {
+ const fetchSystemInfo = useSystemStore((state) => state.fetchSystemInfo);
+
+ useEffect(() => {
+ // Fetch system info on app load
+ fetchSystemInfo();
+
+ const handleOffline = () => toast.error("Internet connection lost");
+ const handleOnline = () => toast.success("Internet connection restored");
+
+ window.addEventListener('offline', handleOffline);
+ window.addEventListener('online', handleOnline);
+
+ return () => {
+ window.removeEventListener('offline', handleOffline);
+ window.removeEventListener('online', handleOnline);
+ };
+ }, [fetchSystemInfo]);
+
+ return (
+
+
+ }>
+ } />
+ } />
+ } />
+ } />
+ } />
+
+
+
+
+ );
+}
+
+export default App;
diff --git a/frontend/src/assets/react.svg b/frontend/src/assets/react.svg
new file mode 100644
index 0000000000000000000000000000000000000000..6c87de9bb3358469122cc991d5cf578927246184
--- /dev/null
+++ b/frontend/src/assets/react.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/frontend/src/components/Layout.jsx b/frontend/src/components/Layout.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..1cef06bb9b466995ed0291803d0faad9707d4f3a
--- /dev/null
+++ b/frontend/src/components/Layout.jsx
@@ -0,0 +1,297 @@
+import { Outlet, NavLink, useLocation } from 'react-router-dom';
+import { useEffect } from 'react';
+import {
+ LayoutDashboard,
+ Layers,
+ BarChart3,
+ Settings,
+ Cpu,
+ HardDrive,
+ Zap,
+ Github,
+ Menu,
+ X
+} from 'lucide-react';
+import { useSystemStore, useUIStore, useModelStore } from '../store';
+import { motion, AnimatePresence } from 'framer-motion';
+
+/**
+ * Main application layout with sidebar navigation
+ */
+export default function Layout() {
+ const { sidebarOpen, toggleSidebar } = useUIStore();
+ const systemInfo = useSystemStore((state) => state.systemInfo);
+ const checkLoadedModel = useModelStore((state) => state.checkLoadedModel);
+ const location = useLocation();
+
+ // Sync model state on mount
+ useEffect(() => {
+ checkLoadedModel();
+ }, []);
+
+ const navItems = [
+ { path: '/dashboard', label: 'Dashboard', icon: LayoutDashboard },
+ { path: '/quantize', label: 'Quantizer', icon: Layers },
+ { path: '/analysis', label: 'Analysis', icon: BarChart3 },
+ { path: '/models', label: 'Models', icon: HardDrive },
+ ];
+
+ return (
+
+ {/* Sidebar */}
+
+ {/* Logo */}
+
+
+
+
+
+
+ Quantizer
+ Neural Network
+
+
+
+
+
+
+
+ {/* Navigation */}
+
+ {navItems.map((item) => (
+ `nav-item ${isActive ? 'active' : ''}`}
+ >
+
+ {item.label}
+
+ ))}
+
+
+ {/* System Status */}
+
+
+
+
+ System Status
+
+ {systemInfo ? (
+
+
+ GPU
+
+ {systemInfo.cuda_available ? 'CUDA' : systemInfo.mps_available ? 'MPS' : 'CPU'}
+
+
+ {systemInfo.gpus?.length > 0 && (
+
+ {systemInfo.gpus[0].name}
+ {systemInfo.gpus[0].total_memory_gb}GB
+
+ )}
+
+ RAM
+
+ {systemInfo.ram_available_gb?.toFixed(1)}GB / {systemInfo.ram_total_gb?.toFixed(1)}GB
+
+
+
+ ) : (
+
+ )}
+
+
+
+
+ GitHub
+
+
+
+
+ {/* Mobile menu button */}
+
+
+
+
+ {/* Main Content */}
+
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/frontend/src/index.css b/frontend/src/index.css
new file mode 100644
index 0000000000000000000000000000000000000000..4a53e159d5552d2d9007c48f6cfea19bc08a9c6f
--- /dev/null
+++ b/frontend/src/index.css
@@ -0,0 +1,751 @@
+/*
+ * Neural Network Quantizer - Design System
+ * Premium glassmorphism dark theme with smooth animations
+ */
+
+/* ============================================
+ CSS Variables - Design Tokens
+ ============================================ */
+:root {
+ /* Colors - Dark Theme */
+ --color-bg-primary: #0a0a0f;
+ --color-bg-secondary: #12121a;
+ --color-bg-tertiary: #1a1a25;
+ --color-bg-elevated: #22222f;
+
+ /* Glass effect backgrounds */
+ --glass-bg: rgba(255, 255, 255, 0.03);
+ --glass-bg-hover: rgba(255, 255, 255, 0.06);
+ --glass-border: rgba(255, 255, 255, 0.08);
+ --glass-border-hover: rgba(255, 255, 255, 0.15);
+
+ /* Accent colors */
+ --color-accent-primary: #6366f1;
+ --color-accent-secondary: #8b5cf6;
+ --color-accent-tertiary: #a855f7;
+ --color-accent-glow: rgba(99, 102, 241, 0.3);
+
+ /* Status colors */
+ --color-success: #10b981;
+ --color-success-bg: rgba(16, 185, 129, 0.1);
+ --color-warning: #f59e0b;
+ --color-warning-bg: rgba(245, 158, 11, 0.1);
+ --color-error: #ef4444;
+ --color-error-bg: rgba(239, 68, 68, 0.1);
+ --color-info: #06b6d4;
+ --color-info-bg: rgba(6, 182, 212, 0.1);
+
+ /* Text colors */
+ --text-primary: #f8fafc;
+ --text-secondary: #94a3b8;
+ --text-tertiary: #64748b;
+ --text-muted: #475569;
+
+ /* Gradients */
+ --gradient-primary: linear-gradient(135deg, var(--color-accent-primary) 0%, var(--color-accent-secondary) 100%);
+ --gradient-secondary: linear-gradient(135deg, var(--color-accent-secondary) 0%, var(--color-accent-tertiary) 100%);
+ --gradient-glow: radial-gradient(ellipse at center, var(--color-accent-glow) 0%, transparent 70%);
+ --gradient-mesh: radial-gradient(at 40% 20%, hsla(228,100%,74%,0.15) 0px, transparent 50%),
+ radial-gradient(at 80% 0%, hsla(189,100%,56%,0.1) 0px, transparent 50%),
+ radial-gradient(at 0% 50%, hsla(355,100%,93%,0.05) 0px, transparent 50%),
+ radial-gradient(at 80% 50%, hsla(340,100%,76%,0.1) 0px, transparent 50%);
+
+ /* Spacing */
+ --space-xs: 0.25rem;
+ --space-sm: 0.5rem;
+ --space-md: 1rem;
+ --space-lg: 1.5rem;
+ --space-xl: 2rem;
+ --space-2xl: 3rem;
+ --space-3xl: 4rem;
+
+ /* Border radius */
+ --radius-sm: 0.375rem;
+ --radius-md: 0.5rem;
+ --radius-lg: 0.75rem;
+ --radius-xl: 1rem;
+ --radius-2xl: 1.5rem;
+ --radius-full: 9999px;
+
+ /* Shadows */
+ --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.3);
+ --shadow-md: 0 4px 6px -1px rgba(0, 0, 0, 0.3), 0 2px 4px -1px rgba(0, 0, 0, 0.2);
+ --shadow-lg: 0 10px 15px -3px rgba(0, 0, 0, 0.4), 0 4px 6px -2px rgba(0, 0, 0, 0.3);
+ --shadow-xl: 0 20px 25px -5px rgba(0, 0, 0, 0.5), 0 10px 10px -5px rgba(0, 0, 0, 0.3);
+ --shadow-glow: 0 0 40px var(--color-accent-glow);
+
+ /* Typography */
+ --font-sans: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
+ --font-mono: 'JetBrains Mono', 'Fira Code', Consolas, monospace;
+
+ --text-xs: 0.75rem;
+ --text-sm: 0.875rem;
+ --text-base: 1rem;
+ --text-lg: 1.125rem;
+ --text-xl: 1.25rem;
+ --text-2xl: 1.5rem;
+ --text-3xl: 1.875rem;
+ --text-4xl: 2.25rem;
+
+ /* Transitions */
+ --transition-fast: 150ms cubic-bezier(0.4, 0, 0.2, 1);
+ --transition-base: 200ms cubic-bezier(0.4, 0, 0.2, 1);
+ --transition-slow: 300ms cubic-bezier(0.4, 0, 0.2, 1);
+ --transition-spring: 500ms cubic-bezier(0.34, 1.56, 0.64, 1);
+
+ /* Layout */
+ --sidebar-width: 280px;
+ --header-height: 64px;
+ --max-content-width: 1400px;
+}
+
+/* ============================================
+ Base Styles
+ ============================================ */
+*, *::before, *::after {
+ box-sizing: border-box;
+ margin: 0;
+ padding: 0;
+}
+
+html {
+ font-size: 16px;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+
+body {
+ font-family: var(--font-sans);
+ background: var(--color-bg-primary);
+ color: var(--text-primary);
+ line-height: 1.6;
+ min-height: 100vh;
+ overflow-x: hidden;
+}
+
+/* Background mesh gradient */
+body::before {
+ content: '';
+ position: fixed;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: var(--gradient-mesh);
+ pointer-events: none;
+ z-index: -1;
+}
+
+#root {
+ min-height: 100vh;
+ display: flex;
+ flex-direction: column;
+}
+
+/* ============================================
+ Typography
+ ============================================ */
+h1, h2, h3, h4, h5, h6 {
+ font-weight: 600;
+ line-height: 1.3;
+ color: var(--text-primary);
+}
+
+h1 { font-size: var(--text-4xl); }
+h2 { font-size: var(--text-3xl); }
+h3 { font-size: var(--text-2xl); }
+h4 { font-size: var(--text-xl); }
+h5 { font-size: var(--text-lg); }
+h6 { font-size: var(--text-base); }
+
+p {
+ color: var(--text-secondary);
+ margin-bottom: var(--space-md);
+}
+
+a {
+ color: var(--color-accent-primary);
+ text-decoration: none;
+ transition: color var(--transition-fast);
+}
+
+a:hover {
+ color: var(--color-accent-secondary);
+}
+
+code {
+ font-family: var(--font-mono);
+ background: var(--glass-bg);
+ padding: 0.2em 0.4em;
+ border-radius: var(--radius-sm);
+ font-size: 0.9em;
+}
+
+/* ============================================
+ Glass Card Component
+ ============================================ */
+.glass-card {
+ background: var(--glass-bg);
+ border: 1px solid var(--glass-border);
+ border-radius: var(--radius-xl);
+ padding: var(--space-lg);
+ backdrop-filter: blur(20px);
+ -webkit-backdrop-filter: blur(20px);
+ transition: all var(--transition-base);
+}
+
+.glass-card:hover {
+ background: var(--glass-bg-hover);
+ border-color: var(--glass-border-hover);
+ transform: translateY(-2px);
+ box-shadow: var(--shadow-lg);
+}
+
+.glass-card.no-hover:hover {
+ transform: none;
+ box-shadow: none;
+}
+
+/* ============================================
+ Button Styles
+ ============================================ */
+.btn {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ gap: var(--space-sm);
+ padding: var(--space-sm) var(--space-lg);
+ border: none;
+ border-radius: var(--radius-lg);
+ font-family: var(--font-sans);
+ font-size: var(--text-sm);
+ font-weight: 500;
+ cursor: pointer;
+ transition: all var(--transition-base);
+ white-space: nowrap;
+}
+
+.btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-primary {
+ background: var(--gradient-primary);
+ color: white;
+ box-shadow: var(--shadow-md), 0 0 20px var(--color-accent-glow);
+}
+
+.btn-primary:hover:not(:disabled) {
+ transform: translateY(-2px);
+ box-shadow: var(--shadow-lg), 0 0 30px var(--color-accent-glow);
+}
+
+.btn-secondary {
+ background: var(--glass-bg);
+ color: var(--text-primary);
+ border: 1px solid var(--glass-border);
+ backdrop-filter: blur(10px);
+}
+
+.btn-secondary:hover:not(:disabled) {
+ background: var(--glass-bg-hover);
+ border-color: var(--glass-border-hover);
+}
+
+.btn-ghost {
+ background: transparent;
+ color: var(--text-secondary);
+}
+
+.btn-ghost:hover:not(:disabled) {
+ background: var(--glass-bg);
+ color: var(--text-primary);
+}
+
+.btn-success {
+ background: var(--color-success);
+ color: white;
+}
+
+.btn-danger {
+ background: var(--color-error);
+ color: white;
+}
+
+.btn-lg {
+ padding: var(--space-md) var(--space-xl);
+ font-size: var(--text-base);
+}
+
+.btn-sm {
+ padding: var(--space-xs) var(--space-md);
+ font-size: var(--text-xs);
+}
+
+.btn-icon {
+ padding: var(--space-sm);
+ aspect-ratio: 1;
+}
+
+/* ============================================
+ Input Styles
+ ============================================ */
+.input-group {
+ display: flex;
+ flex-direction: column;
+ gap: var(--space-xs);
+}
+
+.input-label {
+ font-size: var(--text-sm);
+ font-weight: 500;
+ color: var(--text-secondary);
+}
+
+.input {
+ width: 100%;
+ padding: var(--space-sm) var(--space-md);
+ background: var(--glass-bg);
+ border: 1px solid var(--glass-border);
+ border-radius: var(--radius-md);
+ color: var(--text-primary);
+ font-family: var(--font-sans);
+ font-size: var(--text-sm);
+ transition: all var(--transition-fast);
+}
+
+.input:focus {
+ outline: none;
+ border-color: var(--color-accent-primary);
+ box-shadow: 0 0 0 3px var(--color-accent-glow);
+}
+
+.input::placeholder {
+ color: var(--text-muted);
+}
+
+.input-error {
+ border-color: var(--color-error);
+}
+
+/* Select dropdown */
+.select {
+ appearance: none;
+ background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='%2394a3b8' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E");
+ background-repeat: no-repeat;
+ background-position: right var(--space-sm) center;
+ background-size: 16px;
+ padding-right: var(--space-xl);
+}
+
+/* Slider */
+.slider {
+ width: 100%;
+ height: 6px;
+ background: var(--glass-bg);
+ border-radius: var(--radius-full);
+ appearance: none;
+ cursor: pointer;
+}
+
+.slider::-webkit-slider-thumb {
+ appearance: none;
+ width: 18px;
+ height: 18px;
+ background: var(--gradient-primary);
+ border-radius: 50%;
+ cursor: pointer;
+ box-shadow: var(--shadow-md);
+ transition: transform var(--transition-fast);
+}
+
+.slider::-webkit-slider-thumb:hover {
+ transform: scale(1.2);
+}
+
+/* ============================================
+ Status Badges
+ ============================================ */
+.badge {
+ display: inline-flex;
+ align-items: center;
+ gap: var(--space-xs);
+ padding: var(--space-xs) var(--space-sm);
+ border-radius: var(--radius-full);
+ font-size: var(--text-xs);
+ font-weight: 500;
+}
+
+.badge-success {
+ background: var(--color-success-bg);
+ color: var(--color-success);
+}
+
+.badge-warning {
+ background: var(--color-warning-bg);
+ color: var(--color-warning);
+}
+
+.badge-error {
+ background: var(--color-error-bg);
+ color: var(--color-error);
+}
+
+.badge-info {
+ background: var(--color-info-bg);
+ color: var(--color-info);
+}
+
+/* ============================================
+ Layout Components
+ ============================================ */
+.app-layout {
+ display: flex;
+ min-height: 100vh;
+}
+
+.sidebar {
+ width: var(--sidebar-width);
+ background: var(--color-bg-secondary);
+ border-right: 1px solid var(--glass-border);
+ padding: var(--space-lg);
+ display: flex;
+ flex-direction: column;
+ position: fixed;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ z-index: 100;
+}
+
+.main-content {
+ flex: 1;
+ margin-left: var(--sidebar-width);
+ padding: var(--space-xl);
+ max-width: calc(100vw - var(--sidebar-width));
+}
+
+.page-header {
+ margin-bottom: var(--space-xl);
+}
+
+.page-title {
+ font-size: var(--text-3xl);
+ font-weight: 700;
+ margin-bottom: var(--space-xs);
+}
+
+.page-subtitle {
+ color: var(--text-secondary);
+ font-size: var(--text-base);
+}
+
+/* Grid layout */
+.grid {
+ display: grid;
+ gap: var(--space-lg);
+}
+
+.grid-2 { grid-template-columns: repeat(2, 1fr); }
+.grid-3 { grid-template-columns: repeat(3, 1fr); }
+.grid-4 { grid-template-columns: repeat(4, 1fr); }
+
+@media (max-width: 1024px) {
+ .grid-3, .grid-4 { grid-template-columns: repeat(2, 1fr); }
+}
+
+@media (max-width: 768px) {
+ .grid-2, .grid-3, .grid-4 { grid-template-columns: 1fr; }
+
+ .sidebar {
+ transform: translateX(-100%);
+ transition: transform var(--transition-base);
+ }
+
+ .sidebar.open {
+ transform: translateX(0);
+ }
+
+ .main-content {
+ margin-left: 0;
+ max-width: 100vw;
+ }
+}
+
+/* ============================================
+ Stats Card
+ ============================================ */
+.stat-card {
+ display: flex;
+ flex-direction: column;
+ gap: var(--space-sm);
+}
+
+.stat-value {
+ font-size: var(--text-3xl);
+ font-weight: 700;
+ background: var(--gradient-primary);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ background-clip: text;
+}
+
+.stat-label {
+ font-size: var(--text-sm);
+ color: var(--text-secondary);
+}
+
+.stat-change {
+ font-size: var(--text-xs);
+ display: flex;
+ align-items: center;
+ gap: var(--space-xs);
+}
+
+.stat-change.positive { color: var(--color-success); }
+.stat-change.negative { color: var(--color-error); }
+
+/* ============================================
+ Progress Bar
+ ============================================ */
+.progress-bar {
+ width: 100%;
+ height: 8px;
+ background: var(--glass-bg);
+ border-radius: var(--radius-full);
+ overflow: hidden;
+}
+
+.progress-fill {
+ height: 100%;
+ background: var(--gradient-primary);
+ border-radius: var(--radius-full);
+ transition: width var(--transition-slow);
+}
+
+/* ============================================
+ Tabs
+ ============================================ */
+.tabs {
+ display: flex;
+ gap: var(--space-xs);
+ padding: var(--space-xs);
+ background: var(--glass-bg);
+ border-radius: var(--radius-lg);
+ margin-bottom: var(--space-lg);
+}
+
+.tab {
+ flex: 1;
+ padding: var(--space-sm) var(--space-md);
+ background: transparent;
+ border: none;
+ border-radius: var(--radius-md);
+ color: var(--text-secondary);
+ font-size: var(--text-sm);
+ font-weight: 500;
+ cursor: pointer;
+ transition: all var(--transition-fast);
+}
+
+.tab:hover {
+ color: var(--text-primary);
+ background: var(--glass-bg-hover);
+}
+
+.tab.active {
+ background: var(--gradient-primary);
+ color: white;
+}
+
+/* ============================================
+ Chart Container
+ ============================================ */
+.chart-container {
+ background: var(--glass-bg);
+ border: 1px solid var(--glass-border);
+ border-radius: var(--radius-xl);
+ padding: var(--space-md);
+ min-height: 300px;
+}
+
+.chart-title {
+ font-size: var(--text-sm);
+ font-weight: 600;
+ color: var(--text-primary);
+ margin-bottom: var(--space-md);
+}
+
+/* ============================================
+ Loading States
+ ============================================ */
+.skeleton {
+ background: linear-gradient(
+ 90deg,
+ var(--glass-bg) 25%,
+ var(--glass-bg-hover) 50%,
+ var(--glass-bg) 75%
+ );
+ background-size: 200% 100%;
+ animation: shimmer 1.5s infinite;
+ border-radius: var(--radius-md);
+}
+
+@keyframes shimmer {
+ 0% { background-position: 200% 0; }
+ 100% { background-position: -200% 0; }
+}
+
+.spinner {
+ width: 24px;
+ height: 24px;
+ border: 2px solid var(--glass-border);
+ border-top-color: var(--color-accent-primary);
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* ============================================
+ Tooltips
+ ============================================ */
+.tooltip {
+ position: relative;
+}
+
+.tooltip::after {
+ content: attr(data-tooltip);
+ position: absolute;
+ bottom: 100%;
+ left: 50%;
+ transform: translateX(-50%);
+ padding: var(--space-xs) var(--space-sm);
+ background: var(--color-bg-elevated);
+ border: 1px solid var(--glass-border);
+ border-radius: var(--radius-md);
+ font-size: var(--text-xs);
+ white-space: nowrap;
+ opacity: 0;
+ visibility: hidden;
+ transition: all var(--transition-fast);
+}
+
+.tooltip:hover::after {
+ opacity: 1;
+ visibility: visible;
+}
+
+/* ============================================
+ Animations
+ ============================================ */
+@keyframes fadeIn {
+ from { opacity: 0; }
+ to { opacity: 1; }
+}
+
+@keyframes slideUp {
+ from {
+ opacity: 0;
+ transform: translateY(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+@keyframes scaleIn {
+ from {
+ opacity: 0;
+ transform: scale(0.95);
+ }
+ to {
+ opacity: 1;
+ transform: scale(1);
+ }
+}
+
+.animate-fade-in { animation: fadeIn var(--transition-slow) ease-out; }
+.animate-slide-up { animation: slideUp var(--transition-slow) ease-out; }
+.animate-scale-in { animation: scaleIn var(--transition-spring) ease-out; }
+
+/* Staggered animations */
+.stagger > * {
+ animation: slideUp var(--transition-slow) ease-out forwards;
+ opacity: 0;
+}
+
+.stagger > *:nth-child(1) { animation-delay: 0ms; }
+.stagger > *:nth-child(2) { animation-delay: 50ms; }
+.stagger > *:nth-child(3) { animation-delay: 100ms; }
+.stagger > *:nth-child(4) { animation-delay: 150ms; }
+.stagger > *:nth-child(5) { animation-delay: 200ms; }
+.stagger > *:nth-child(6) { animation-delay: 250ms; }
+
+/* ============================================
+ Scrollbar Styles
+ ============================================ */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: var(--color-bg-secondary);
+}
+
+::-webkit-scrollbar-thumb {
+ background: var(--glass-border);
+ border-radius: var(--radius-full);
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: var(--glass-border-hover);
+}
+
+/* ============================================
+ Utility Classes
+ ============================================ */
+.text-center { text-align: center; }
+.text-right { text-align: right; }
+.text-sm { font-size: var(--text-sm); }
+.text-xs { font-size: var(--text-xs); }
+.text-muted { color: var(--text-secondary); }
+.text-accent { color: var(--color-accent-primary); }
+
+.flex { display: flex; }
+.flex-col { flex-direction: column; }
+.items-center { align-items: center; }
+.justify-between { justify-content: space-between; }
+.justify-center { justify-content: center; }
+.gap-sm { gap: var(--space-sm); }
+.gap-md { gap: var(--space-md); }
+.gap-lg { gap: var(--space-lg); }
+
+.mt-sm { margin-top: var(--space-sm); }
+.mt-md { margin-top: var(--space-md); }
+.mt-lg { margin-top: var(--space-lg); }
+.mb-sm { margin-bottom: var(--space-sm); }
+.mb-md { margin-bottom: var(--space-md); }
+.mb-lg { margin-bottom: var(--space-lg); }
+
+.w-full { width: 100%; }
+.h-full { height: 100%; }
+
+.overflow-hidden { overflow: hidden; }
+.overflow-auto { overflow: auto; }
+
+.relative { position: relative; }
+.absolute { position: absolute; }
+
+.rounded { border-radius: var(--radius-md); }
+.rounded-lg { border-radius: var(--radius-lg); }
+.rounded-xl { border-radius: var(--radius-xl); }
+
+.shadow { box-shadow: var(--shadow-md); }
+.shadow-lg { box-shadow: var(--shadow-lg); }
+.shadow-glow { box-shadow: var(--shadow-glow); }
diff --git a/frontend/src/main.jsx b/frontend/src/main.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b9a1a6deac8775b5598874b2bc3c7971d82cf211
--- /dev/null
+++ b/frontend/src/main.jsx
@@ -0,0 +1,10 @@
+import { StrictMode } from 'react'
+import { createRoot } from 'react-dom/client'
+import './index.css'
+import App from './App.jsx'
+
+createRoot(document.getElementById('root')).render(
+
+
+ ,
+)
diff --git a/frontend/src/pages/Analysis.jsx b/frontend/src/pages/Analysis.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..03839713e245e87961dacf2f09b86321305b6d1e
--- /dev/null
+++ b/frontend/src/pages/Analysis.jsx
@@ -0,0 +1,483 @@
+import { useState, useEffect } from 'react';
+import {
+ BarChart3,
+ Layers,
+ TrendingUp,
+ RefreshCw,
+ AlertTriangle
+} from 'lucide-react';
+import { useQuantizationStore, useModelStore } from '../store';
+import { motion } from 'framer-motion';
+import {
+ BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip,
+ ResponsiveContainer, Cell, Legend
+} from 'recharts';
+
+/**
+ * Analysis page - compare quantization methods and analyze weights
+ */
+export default function Analysis() {
+ const { compareMethod } = useQuantizationStore();
+ const { modelInfo, layers, fetchLayers } = useModelStore();
+
+ const [comparison, setComparison] = useState(null);
+ const [isLoading, setIsLoading] = useState(false);
+ const [selectedMethods, setSelectedMethods] = useState(['int8', 'int4', 'nf4']);
+ const [source, setSource] = useState('random'); // 'random' | 'layer'
+ const [selectedLayer, setSelectedLayer] = useState('');
+
+ // Switch to layer mode if model is loaded
+ // Switch to layer mode if model is loaded
+ useEffect(() => {
+ if (modelInfo) {
+ setSource('layer');
+ if (layers.length === 0) fetchLayers();
+ }
+ }, [modelInfo]);
+
+ const runComparison = async () => {
+ setIsLoading(true);
+ const layerToCompare = source === 'layer' ? selectedLayer : null;
+ const result = await compareMethod(selectedMethods, layerToCompare);
+ setComparison(result);
+ setIsLoading(false);
+ };
+
+ const toggleMethod = (method) => {
+ setSelectedMethods((prev) =>
+ prev.includes(method)
+ ? prev.filter(m => m !== method)
+ : [...prev, method]
+ );
+ };
+
+ // Prepare chart data
+ const getComparisonData = () => {
+ if (!comparison?.comparison) return [];
+ return comparison.comparison
+ .filter(c => !c.error)
+ .map(c => ({
+ method: c.method.toUpperCase(),
+ meanError: c.mean_error,
+ maxError: c.max_error,
+ memorySavings: c.memory_savings_percent
+ }));
+ };
+
+ const COLORS = ['#6366f1', '#8b5cf6', '#a855f7'];
+
+ return (
+
+ {/* Header */}
+
+
Analysis
+
+ Compare quantization methods and analyze weight distributions
+
+ {modelInfo && (
+
+ Active Model:
+ {modelInfo.name}
+
+ )}
+
+
+ {/* Method Comparison */}
+
+
+
+
+ Method Comparison
+ {comparison && (
+
+ Source: {comparison.source.startsWith('layer:') ? comparison.source.replace('layer:', '') : 'Random Weights'}
+
+ )}
+
+
+ {isLoading ? (
+ <>
+
+ Comparing...
+ >
+ ) : (
+ <>
+
+ Run Comparison
+ >
+ )}
+
+
+
+ {/* Data Source Selection */}
+
+
Select data source:
+
+
+
+ {modelInfo && (
+ setSource('layer')}
+ >
+ Loaded Model Layer
+
+ )}
+ setSource('random')}
+ >
+ Random Weights
+
+
+
+
+ {source === 'layer' && (
+
+ setSelectedLayer(e.target.value)}
+ >
+ Select a layer...
+ {layers.map((layer) => (
+
+ {layer}
+
+ ))}
+
+
+ )}
+
+
+ {/* Method Selection */}
+
+
Select methods to compare:
+
+ {['int8', 'int4', 'nf4'].map((method) => (
+
toggleMethod(method)}
+ >
+
+ {selectedMethods.includes(method) && '✓'}
+
+
+ {method.toUpperCase()}
+
+ {method === 'int8' && '8-bit integer quantization'}
+ {method === 'int4' && '4-bit integer with grouping'}
+ {method === 'nf4' && 'Normal Float 4-bit (QLoRA)'}
+
+
+
+ ))}
+
+
+
+ {/* Comparison Results */}
+ {comparison && (
+
+
+ {/* Error Chart */}
+
+
Quantization Error by Method
+
+
+
+
+
+
+
+ {getComparisonData().map((entry, index) => (
+ |
+ ))}
+
+
+
+
+
+ {/* Memory Savings Chart */}
+
+
Memory Savings by Method
+
+
+
+
+
+ [`${value.toFixed(1)}%`, 'Savings']}
+ />
+
+ {getComparisonData().map((entry, index) => (
+ |
+ ))}
+
+
+
+
+
+
+ {/* Results Table */}
+
+
+
+
+ Method
+ Bits
+ Max Error
+ Mean Error
+ Memory Savings
+
+
+
+ {comparison.comparison?.filter(c => !c.error).map((result) => (
+
+ {result.method.toUpperCase()}
+ {result.bits}
+ {result.max_error?.toFixed(6)}
+ {result.mean_error?.toFixed(6)}
+
+
+ {result.memory_savings_percent?.toFixed(1)}%
+
+
+
+ ))}
+
+
+
+
+ )}
+
+
+ {/* Model Analysis (if model loaded) */}
+ {modelInfo && (
+
+
+
+ Model Analysis
+
+
+
+
+ Model {modelInfo.name} is loaded with{' '}
+ {modelInfo.num_quantizable_layers} quantizable layers.
+
+
+ Use the Models page to analyze individual layer weights and detect outliers.
+
+
+
+ )}
+
+ {/* Info Section */}
+
+
+
+
+
Understanding Quantization Trade-offs
+
+ Lower bit precision (4-bit) provides better memory savings but introduces more error.
+ 8-bit quantization offers a good balance between compression and accuracy for most models.
+ NF4 uses a codebook optimized for normally distributed weights, ideal for LLMs.
+
+
+
+
+
+
+
+ );
+}
diff --git a/frontend/src/pages/Dashboard.jsx b/frontend/src/pages/Dashboard.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b4db53686813bb741386032b9419fabbd5635793
--- /dev/null
+++ b/frontend/src/pages/Dashboard.jsx
@@ -0,0 +1,412 @@
+import { useEffect, useState } from 'react';
+import { Link } from 'react-router-dom';
+import {
+ Zap,
+ Cpu,
+ HardDrive,
+ TrendingUp,
+ ArrowRight,
+ Layers,
+ Activity,
+ MemoryStick
+} from 'lucide-react';
+import { useSystemStore, useQuantizationStore, useModelStore } from '../store';
+import { motion } from 'framer-motion';
+
+/**
+ * Dashboard page - overview of system and recent activity
+ */
+export default function Dashboard() {
+ const systemInfo = useSystemStore((state) => state.systemInfo);
+ const fetchSystemInfo = useSystemStore((state) => state.fetchSystemInfo);
+ const quantizationHistory = useQuantizationStore((state) => state.history);
+ const modelInfo = useModelStore((state) => state.modelInfo);
+
+ useEffect(() => {
+ if (!systemInfo) {
+ fetchSystemInfo();
+ }
+ }, [systemInfo, fetchSystemInfo]);
+
+ const stats = [
+ {
+ label: 'GPU Status',
+ value: systemInfo?.cuda_available ? 'CUDA Ready' : systemInfo?.mps_available ? 'MPS Ready' : 'CPU Only',
+ icon: Cpu,
+ color: systemInfo?.cuda_available ? 'success' : 'warning',
+ detail: systemInfo?.gpus?.[0]?.name || 'No GPU detected'
+ },
+ {
+ label: 'Available RAM',
+ value: `${systemInfo?.ram_available_gb?.toFixed(1) || '?'}GB`,
+ icon: MemoryStick,
+ color: 'info',
+ detail: `of ${systemInfo?.ram_total_gb?.toFixed(1) || '?'}GB total`
+ },
+ {
+ label: 'Max Model Size',
+ value: systemInfo?.max_model_size || 'Unknown',
+ icon: Layers,
+ color: 'accent',
+ detail: 'Recommended limit'
+ },
+ {
+ label: 'Quantizations',
+ value: quantizationHistory.length,
+ icon: Activity,
+ color: 'success',
+ detail: 'This session'
+ }
+ ];
+
+ const quickActions = [
+ {
+ title: 'Quick Quantize',
+ description: 'Test quantization on random weights',
+ path: '/quantize',
+ icon: Zap,
+ gradient: 'var(--gradient-primary)'
+ },
+ {
+ title: 'Load Model',
+ description: 'Load a HuggingFace model',
+ path: '/models',
+ icon: HardDrive,
+ gradient: 'var(--gradient-secondary)'
+ },
+ {
+ title: 'Analyze Weights',
+ description: 'Deep dive into weight distributions',
+ path: '/analysis',
+ icon: TrendingUp,
+ gradient: 'linear-gradient(135deg, #10b981 0%, #06b6d4 100%)'
+ }
+ ];
+
+ return (
+
+ {/* Header */}
+
+
Dashboard
+
+ Neural Network Weight Quantization Tool
+
+
+
+ {/* Stats Grid */}
+
+ {stats.map((stat, index) => (
+
+
+
+
+
+
{stat.value}
+
{stat.label}
+
{stat.detail}
+
+
+ ))}
+
+
+ {/* Quick Actions */}
+
+ Quick Actions
+
+ {quickActions.map((action, index) => (
+
+
+
+
+
{action.title}
+
{action.description}
+
+
+
+
+ ))}
+
+
+
+ {/* Current Model */}
+ {modelInfo && (
+
+ Loaded Model
+
+
+
+
+
{modelInfo.name}
+
{modelInfo.architecture}
+
+
+
+
+ {modelInfo.num_params_billions?.toFixed(2)}B
+ Parameters
+
+
+ {modelInfo.num_quantizable_layers}
+ Quantizable Layers
+
+
+ {modelInfo.memory_footprint_gb}GB
+ Memory
+
+
+
+
+ )}
+
+ {/* Getting Started */}
+ {!modelInfo && quantizationHistory.length === 0 && (
+
+
+
+
+
Get Started
+
+ Welcome to the Neural Network Quantizer! You can either test quantization
+ on random weights or load a real HuggingFace model for production use.
+
+
+
+
+ Try Quantization
+
+
+
+ Load Model
+
+
+
+
+
+ )}
+
+ {/* System Warnings */}
+ {systemInfo?.warnings?.length > 0 && (
+
+ System Warnings
+
+ {systemInfo.warnings.map((warning, index) => (
+
+ Warning
+ {warning}
+
+ ))}
+
+
+ )}
+
+
+
+ );
+}
diff --git a/frontend/src/pages/ModelLoader.jsx b/frontend/src/pages/ModelLoader.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e6d2c45b45d995408ac36f252c95484ac2a0a9bf
--- /dev/null
+++ b/frontend/src/pages/ModelLoader.jsx
@@ -0,0 +1,775 @@
+import { useState, useEffect, useRef } from 'react';
+import {
+ Upload,
+ Cpu,
+ HardDrive,
+ Database,
+ CheckCircle,
+ AlertCircle,
+ Loader2,
+ Package,
+ Trash2,
+ Sparkles,
+ Clock,
+ Download
+} from 'lucide-react';
+import { useSystemStore } from '../store';
+import { motion, AnimatePresence } from 'framer-motion';
+
+/**
+ * ModelLoader page - load HuggingFace models with progress tracking
+ */
+export default function ModelLoader() {
+ const systemInfo = useSystemStore((state) => state.systemInfo);
+
+ const [modelName, setModelName] = useState('');
+ const [exampleModels, setExampleModels] = useState(null);
+ const [loadResult, setLoadResult] = useState(null);
+ const [isLoading, setIsLoading] = useState(false);
+ const [progress, setProgress] = useState(null);
+ const [cachedModels, setCachedModels] = useState([]);
+ const [modelInfo, setModelInfo] = useState(null);
+
+ const progressPollRef = useRef(null);
+
+ // Fetch example models and cache info on mount
+ useEffect(() => {
+ // Optimistic load from cache
+ const cachedExamples = localStorage.getItem('example_models');
+ if (cachedExamples) {
+ try {
+ setExampleModels(JSON.parse(cachedExamples));
+ } catch (e) { }
+ }
+
+ fetch('/api/models/examples')
+ .then(res => res.json())
+ .then(data => {
+ setExampleModels(data);
+ localStorage.setItem('example_models', JSON.stringify(data));
+ })
+ .catch(() => { });
+
+ fetchCacheInfo();
+ fetchModelInfo();
+ }, []);
+
+ const fetchCacheInfo = async () => {
+ try {
+ const res = await fetch('/api/models/cache');
+ const data = await res.json();
+ setCachedModels(data.models || []);
+ } catch (e) { }
+ };
+
+ const fetchModelInfo = async () => {
+ try {
+ const res = await fetch('/api/models/info');
+ const data = await res.json();
+ if (data.loaded) {
+ setModelInfo(data);
+ }
+ } catch (e) { }
+ };
+
+ const pollProgress = (name) => {
+ if (progressPollRef.current) {
+ clearInterval(progressPollRef.current);
+ }
+
+ progressPollRef.current = setInterval(async () => {
+ try {
+ const res = await fetch(`/api/models/progress/${encodeURIComponent(name)}`);
+ const data = await res.json();
+ if (data.downloading) {
+ setProgress(data);
+ }
+ } catch (e) { }
+ }, 500);
+ };
+
+ const stopPolling = () => {
+ if (progressPollRef.current) {
+ clearInterval(progressPollRef.current);
+ progressPollRef.current = null;
+ }
+ };
+
+ const handleLoadModel = async () => {
+ if (!modelName.trim() || isLoading) return;
+
+ setIsLoading(true);
+ setLoadResult(null);
+ setProgress({ status: 'starting', percent: 0, message: 'Starting download...' });
+
+ // Start polling for progress
+ pollProgress(modelName.trim());
+
+ try {
+ const response = await fetch('/api/models/load', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ model_name: modelName.trim(),
+ dtype: 'auto',
+ device: 'auto',
+ trust_remote_code: true
+ })
+ });
+
+ const data = await response.json();
+ setLoadResult(data);
+
+ if (data.success) {
+ setModelInfo(data.model_info);
+ setProgress({ status: 'complete', percent: 100, message: 'Model loaded!' });
+ fetchCacheInfo();
+ } else {
+ setProgress(null);
+ }
+ } catch (err) {
+ setLoadResult({ success: false, error: err.message });
+ setProgress(null);
+ } finally {
+ setIsLoading(false);
+ stopPolling();
+ }
+ };
+
+ const handleQuickLoad = (modelId) => {
+ setModelName(modelId);
+ };
+
+ const handleUnload = async () => {
+ try {
+ await fetch('/api/models/unload', { method: 'POST' });
+ setModelInfo(null);
+ setLoadResult(null);
+ setProgress(null);
+ } catch (e) { }
+ };
+
+ const handleDeleteFromCache = async (name) => {
+ try {
+ await fetch(`/api/models/cache/${encodeURIComponent(name)}`, { method: 'DELETE' });
+ fetchCacheInfo();
+ } catch (e) { }
+ };
+
+ const handleCleanup = async () => {
+ try {
+ const res = await fetch('/api/models/cache/cleanup', { method: 'POST' });
+ const data = await res.json();
+ fetchCacheInfo();
+ alert(`Cleaned up ${data.deleted_count} models`);
+ } catch (e) { }
+ };
+
+ return (
+
+ {/* Header */}
+
+
Load HuggingFace Model
+
+ Download and analyze models directly from HuggingFace Hub
+
+
+
+ {/* Main Content */}
+
+ {/* Load Model Card */}
+
+
+
+
+
Model ID
+
setModelName(e.target.value)}
+ onKeyDown={(e) => e.key === 'Enter' && handleLoadModel()}
+ disabled={isLoading}
+ />
+
+ Enter the HuggingFace model identifier (organization/model-name)
+
+
+
+
+ {isLoading ? (
+ <>
+
+ Loading...
+ >
+ ) : (
+ <>
+
+ Download & Load Model
+ >
+ )}
+
+
+ {/* Progress Bar */}
+
+ {progress && (
+
+
+ {progress.message || progress.status}
+ {progress.percent || 0}%
+
+
+
+
+ {progress.speed_mbps && (
+
+ {progress.speed_mbps} MB/s
+ {progress.eta_seconds && ETA: {progress.eta_seconds}s }
+
+ )}
+
+ )}
+
+
+ {/* Result Message */}
+
+ {loadResult && !isLoading && (
+
+ {loadResult.success ? (
+ <>
+
+
+
Model loaded successfully!
+
{loadResult.model_info?.architecture} - {loadResult.model_info?.num_params_millions}M params
+
+ >
+ ) : (
+ <>
+
+
+
Failed to load model
+
{loadResult.error}
+ {loadResult.suggestion &&
{loadResult.suggestion}
}
+
+ >
+ )}
+
+ )}
+
+
+
+ {/* Currently Loaded Model */}
+ {modelInfo && (
+
+
+
+
Loaded Model
+
+
+ Unload
+
+
+
+
+
+ Name
+ {modelInfo.name}
+
+
+ Parameters
+ {modelInfo.num_params_millions}M
+
+
+ Memory
+ {modelInfo.memory_mb?.toFixed(1)} MB
+
+
+ Device
+ {modelInfo.device}
+
+
+ Quantizable Layers
+ {modelInfo.num_quantizable_layers}
+
+
+
+ )}
+
+ {/* Quick Start */}
+
+
+
+
Quick Start
+
+
+ Click to select a model:
+
+ {exampleModels ? (
+ <>
+ {exampleModels.sample_models?.length > 0 && (
+
+
⭐ Sample Models (Pre-cached)
+
+ {exampleModels.sample_models.map((model) => (
+ handleQuickLoad(model.id)}
+ >
+ {model.id}
+ Instant load
+
+ ))}
+
+
+ )}
+
+
+
Small Models
+
+ {exampleModels.small_models?.map((model) => (
+ handleQuickLoad(model.id)}
+ >
+ {model.id}
+ {model.size}
+
+ ))}
+
+
+ >
+ ) : (
+
+
+ Loading examples...
+
+ )}
+
+
+ {/* System Status */}
+
+
+
+
System
+
+
+ {systemInfo ? (
+
+
+ Device
+
+ {systemInfo.cuda_available ? '🟢 CUDA GPU' :
+ systemInfo.mps_available ? '🟢 Apple MPS' : '🟡 CPU'}
+
+
+
+ {systemInfo.gpus?.length > 0 && (
+
+ GPU
+ {systemInfo.gpus[0].name}
+
+ )}
+
+
+ RAM
+ {systemInfo.ram_available_gb?.toFixed(1)} GB
+
+
+ ) : (
+ Loading...
+ )}
+
+
+ {/* Cached Models */}
+
+
+
+
Model Cache
+
+
+ Cleanup
+
+
+
+
+ Models auto-delete after 4 hours (except samples)
+
+
+ {cachedModels.length > 0 ? (
+
+ {cachedModels.map((model) => (
+
+
+
+ {model.is_sample && '⭐ '}
+ {model.name}
+
+ {model.size_mb} MB
+
+ {!model.is_sample && (
+
handleDeleteFromCache(model.name)}
+ >
+
+
+ )}
+
+ ))}
+
+ ) : (
+ No models cached
+ )}
+
+
+
+
+
+ );
+}
diff --git a/frontend/src/pages/Quantizer.jsx b/frontend/src/pages/Quantizer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..96468801cac508f8779487a3b1a2437b0ef966f9
--- /dev/null
+++ b/frontend/src/pages/Quantizer.jsx
@@ -0,0 +1,879 @@
+import { useState, useEffect } from 'react';
+import {
+ Layers,
+ Play,
+ Settings2,
+ Zap,
+ RefreshCw,
+ Download
+} from 'lucide-react';
+import { useQuantizationStore, useModelStore } from '../store';
+import { motion, AnimatePresence } from 'framer-motion';
+import {
+ BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip,
+ ResponsiveContainer, Cell, AreaChart, Area
+} from 'recharts';
+
+/**
+ * Quantizer page - main quantization interface
+ */
+export default function Quantizer() {
+ const { result, isQuantizing, quantizeWeights, quantizeLayer, quantizeModel, clearResult } = useQuantizationStore();
+ const { modelInfo, layers, fetchLayers } = useModelStore();
+
+ // Configuration state
+ const [config, setConfig] = useState({
+ inFeatures: 64,
+ outFeatures: 128,
+ bits: 8,
+ method: 'int8',
+ mode: 'symmetric',
+ groupSize: null,
+ pattern: 'random',
+ dtype: 'float32'
+ });
+
+ const [activeTab, setActiveTab] = useState('heatmaps');
+ const [source, setSource] = useState('custom'); // 'custom' | 'layer'
+ const [target, setTarget] = useState('single'); // 'single' | 'full'
+ const [selectedLayer, setSelectedLayer] = useState('');
+
+ // Switch to layer mode if model is loaded
+ useEffect(() => {
+ if (modelInfo) {
+ setSource('layer');
+ if (layers.length === 0) fetchLayers();
+ }
+ }, [modelInfo]);
+
+ const handleQuantize = async () => {
+ if (source === 'layer') {
+ if (target === 'full') {
+ await quantizeModel(config);
+ } else if (selectedLayer) {
+ await quantizeLayer(selectedLayer, config);
+ }
+ } else {
+ await quantizeWeights(config);
+ }
+ };
+
+ const updateConfig = (key, value) => {
+ setConfig((prev) => ({ ...prev, [key]: value }));
+
+ // Auto-update method based on bits
+ if (key === 'bits') {
+ if (value === 4) {
+ setConfig((prev) => ({ ...prev, bits: value, method: 'int4', groupSize: 128 }));
+ } else {
+ setConfig((prev) => ({ ...prev, bits: value, method: 'int8', groupSize: null }));
+ }
+ }
+ };
+
+ // Convert histogram data for Recharts
+ const getHistogramData = (viz) => {
+ if (!viz?.data) return [];
+ const { x, y } = viz.data;
+ if (!x || !y) return [];
+ return x.map((val, i) => ({
+ value: typeof val === 'number' ? val.toFixed(3) : val,
+ count: y[i]
+ }));
+ };
+
+ // Generate heatmap as a simple statistical summary
+ const getHeatmapStats = (viz) => {
+ if (!viz?.data?.z) return null;
+ const z = viz.data.z;
+ const flat = z.flat();
+ return {
+ min: Math.min(...flat).toFixed(4),
+ max: Math.max(...flat).toFixed(4),
+ mean: (flat.reduce((a, b) => a + b, 0) / flat.length).toFixed(4),
+ rows: z.length,
+ cols: z[0]?.length || 0
+ };
+ };
+
+ return (
+
+ {/* Header */}
+
+
Weight Quantizer
+
+ Quantize neural network weights to lower precision formats
+
+ {modelInfo && (
+
+ Active Model:
+ {modelInfo.name}
+
+ )}
+
+
+
+ {/* Configuration Panel */}
+
+
+
+
Configuration
+
+
+ {/* Source Selection */}
+ {modelInfo && (
+
+
Data Source
+
+ setSource('layer')}
+ >
+ Loaded Model
+
+ setSource('custom')}
+ >
+ Custom Weights
+
+
+
+ {source === 'layer' && (
+ <>
+
Scope
+
+ setTarget('single')}
+ >
+ Single Layer
+
+ setTarget('full')}
+ >
+ All Layers
+
+
+ >
+ )}
+
+ )}
+
+ {/* Weight Selection */}
+ {(source === 'custom' || target === 'single') && (
+
+
+ {source === 'layer' ? 'Select Layer' : 'Weight Dimensions'}
+
+
+ {source === 'layer' ? (
+
+ Layer
+ setSelectedLayer(e.target.value)}
+ >
+ Select a layer...
+ {layers.map((layer) => (
+
+ {layer}
+
+ ))}
+
+
+ ) : (
+ <>
+
+ Output Features
+ updateConfig('outFeatures', parseInt(e.target.value))}
+ />
+ {config.outFeatures}
+
+
+
+ Input Features
+ updateConfig('inFeatures', parseInt(e.target.value))}
+ />
+ {config.inFeatures}
+
+ >
+ )}
+
+ )}
+
+ {/* Quantization Settings */}
+
+
Quantization
+
+
+
Precision (bits)
+
+ updateConfig('bits', 8)}
+ >
+ 8-bit
+
+ updateConfig('bits', 4)}
+ >
+ 4-bit
+
+
+
+
+
+ Method
+ updateConfig('method', e.target.value)}
+ >
+ INT8 (Per-Channel)
+ INT4 (Grouped)
+ NF4 (QLoRA Style)
+
+
+
+
+ Mode
+ updateConfig('mode', e.target.value)}
+ >
+ Symmetric
+ Asymmetric
+
+
+
+ {config.bits === 4 && (
+
+ Group Size
+ updateConfig('groupSize', parseInt(e.target.value))}
+ >
+ 32
+ 64
+ 128
+ 256
+
+
+ )}
+
+
+ {/* Weight Pattern (Custom Only) */}
+ {source === 'custom' && (
+
+
Weight Pattern
+
+
+ {['random', 'gradient', 'ones', 'alternating', 'eye'].map((pattern) => (
+ updateConfig('pattern', pattern)}
+ >
+ {pattern}
+
+ ))}
+
+
+ )}
+
+ {/* Actions */}
+
+
+ {isQuantizing ? (
+ <>
+
+ Processing...
+ >
+ ) : (
+ <>
+
+ {target === 'full' ? 'Quantize Entire Model' : 'Quantize'}
+ >
+ )}
+
+
+ {result && (
+
+ Clear Results
+
+ )}
+
+
+
+ {/* Results Panel */}
+
+
+ {result ? (
+ result.summary ? (
+
+ ) : (
+
+ {/* Stats */}
+
+
+
+
{result.stats.memory_savings_percent.toFixed(1)}%
+
Memory Saved
+
+
+
{result.stats.max_error.toFixed(6)}
+
Max Error
+
+
+
{result.stats.mean_error.toFixed(6)}
+
Mean Error
+
+
+
{config.bits}-bit
+
Precision
+
+
+
+
+ {/* Visualization Tabs */}
+
+ setActiveTab('heatmaps')}
+ >
+ Statistics
+
+ setActiveTab('distributions')}
+ >
+ Distributions
+
+ setActiveTab('error')}
+ >
+ Details
+
+
+
+ {/* Charts */}
+
+ {activeTab === 'heatmaps' && (
+
+ {['original_heatmap', 'quantized_heatmap', 'dequantized_heatmap', 'error_heatmap'].map((key) => {
+ const viz = result.visualizations[key];
+ const stats = getHeatmapStats(viz);
+ const title = viz?.layout?.title || key.replace('_', ' ');
+ return (
+
+
{title}
+ {stats && (
+
+
+ Shape:
+ {stats.rows} × {stats.cols}
+
+
+ Min:
+ {stats.min}
+
+
+ Max:
+ {stats.max}
+
+
+ Mean:
+ {stats.mean}
+
+
+ )}
+
+ );
+ })}
+
+ )}
+
+ {activeTab === 'distributions' && (
+
+
+
Original Weight Distribution
+
+
+
+
+
+
+
+
+
+
+
+
+
Quantized Weight Distribution
+
+
+
+
+
+
+
+
+
+
+
+
+
Quantization Scales Distribution
+
+
+
+
+
+
+
+
+
+
+
+ )}
+
+ {activeTab === 'error' && (
+
+
Quantization Details
+
+ {result.layer_name && (
+
+ Analyzed Layer:
+ {result.layer_name}
+
+ )}
+
+ Original Shape:
+ {JSON.stringify(result.stats.original_shape)}
+
+
+ Quantized Shape:
+ {JSON.stringify(result.stats.quantized_shape)}
+
+
+ Scales Shape:
+ {JSON.stringify(result.stats.scales_shape)}
+
+
+ Original Dtype:
+ {result.stats.original_dtype}
+
+
+ Quantized Dtype:
+ {result.stats.quantized_dtype}
+
+
+ Max Error:
+ {result.stats.max_error.toExponential(4)}
+
+
+ Mean Error:
+ {result.stats.mean_error.toExponential(4)}
+
+
+ Memory Savings:
+ {result.stats.memory_savings_percent.toFixed(2)}%
+
+
+
+ )}
+
+
+ )
+ ) : (
+
+
+ No Results Yet
+ Configure your quantization settings and click "Quantize" to see the results.
+
+ )}
+
+
+
+
+
+
+ );
+}
+
+function FullModelResults({ result }) {
+ if (!result || !result.summary) return null;
+
+ return (
+
+ Model Quantization Summary
+
+
+
+
+ {result.summary.total_memory_saved_mb.toFixed(1)} MB
+
+
Total Saved
+
+
+
+ {result.summary.average_error.toExponential(2)}
+
+
Avg Error
+
+
+
+ {result.summary.layers_quantized}/{result.summary.total_layers}
+
+
Layers
+
+
+
+ Layer Details
+
+
+
+
+ Layer
+ Shape
+ Error
+ Saved
+
+
+
+ {result.layers.map((layer, i) => (
+
+
+ {layer.layer_name.split('.').slice(-2).join('.')}
+
+
+ {JSON.stringify(layer.shape)}
+
+
+ {layer.error?.toExponential(2) || 'N/A'}
+
+
+ {layer.memory_savings_percent?.toFixed(1)}%
+
+
+ ))}
+
+
+
+
+ );
+}
diff --git a/frontend/src/store/index.js b/frontend/src/store/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..1219f99748e1d7ced9aad0a21db249830c73486e
--- /dev/null
+++ b/frontend/src/store/index.js
@@ -0,0 +1,331 @@
+import { create } from 'zustand';
+import { toast } from 'react-hot-toast';
+
+/**
+ * Main application store using Zustand
+ * Manages global state: system info, model, quantization results
+ */
+
+// API base URL - use relative path for Vite proxy in development
+const API_BASE = import.meta.env.VITE_API_URL || '/api';
+
+// System store - hardware info and capabilities
+export const useSystemStore = create((set, get) => ({
+ systemInfo: null,
+ isLoading: false,
+ error: null,
+
+ fetchSystemInfo: async () => {
+ set({ isLoading: true, error: null });
+
+ // Optimistic load from cache
+ const cachedSystem = localStorage.getItem('system_info');
+ if (cachedSystem) {
+ try {
+ set({ systemInfo: JSON.parse(cachedSystem) });
+ } catch (e) { }
+ }
+
+ try {
+ const response = await fetch(`${API_BASE}/system/info`);
+ if (!response.ok) throw new Error('Failed to fetch system info');
+ const data = await response.json();
+ set({ systemInfo: data, isLoading: false });
+ localStorage.setItem('system_info', JSON.stringify(data));
+ return data;
+ } catch (error) {
+ set({ error: error.message, isLoading: false });
+ return null;
+ }
+ },
+
+ checkModelRequirements: async (paramsB, dtype = 'fp16') => {
+ try {
+ const response = await fetch(
+ `${API_BASE}/system/check-model?model_params_billions=${paramsB}&dtype=${dtype}`,
+ { method: 'POST' }
+ );
+ return await response.json();
+ } catch (error) {
+ return { can_load: false, error: error.message };
+ }
+ }
+}));
+
+// Model store - loaded model info
+export const useModelStore = create((set, get) => ({
+ modelInfo: null,
+ layers: [],
+ isLoading: false,
+ loadingProgress: 0,
+ error: null,
+
+ loadModel: async (modelName, options = {}) => {
+ set({ isLoading: true, loadingProgress: 0, error: null });
+
+ try {
+ const response = await fetch(`${API_BASE}/models/load`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ model_name: modelName,
+ model_type: options.modelType || 'generic',
+ dtype: options.dtype || 'auto',
+ device: options.device || 'auto',
+ low_memory: options.lowMemory || false,
+ trust_remote_code: true
+ })
+ });
+
+ const data = await response.json();
+
+ if (data.success) {
+ set({
+ modelInfo: data.model_info || { name: data.name || data.model_id },
+ isLoading: false,
+ loadingProgress: 100
+ });
+ toast.success(`Model loaded: ${data.model_info?.name || modelName}`);
+ return data;
+ } else {
+ const errMsg = data.error || 'Failed to load model';
+ set({ error: errMsg, isLoading: false });
+ toast.error(errMsg);
+ return data;
+ }
+ } catch (error) {
+ set({ error: error.message, isLoading: false });
+ toast.error(`Connection failed: ${error.message}`);
+ return { success: false, error: error.message };
+ }
+ },
+
+ fetchModelInfo: async (modelName) => {
+ set({ isLoading: true, error: null });
+ // Start streaming load... handled by component usually or separate action
+ },
+
+ checkLoadedModel: async () => {
+ try {
+ const response = await fetch(`${API_BASE}/models/info`);
+ const data = await response.json();
+ if (data && data.name) {
+ set({
+ modelInfo: {
+ name: data.name,
+ num_params: data.num_params,
+ memory_mb: data.memory_mb,
+ device: data.device,
+ dtype: data.dtype
+ },
+ error: null
+ });
+ // Also fetch layers
+ get().fetchLayers();
+ } else {
+ set({ modelInfo: null, layers: [] });
+ }
+ } catch (error) {
+ console.error('Failed to check loaded model:', error);
+ }
+ },
+
+ fetchLayers: async () => {
+ try {
+ const response = await fetch(`${API_BASE}/models/layers`);
+ const data = await response.json();
+ // Use quantizable_layers (strings) for the dropdown
+ if (data.quantizable_layers) {
+ set({ layers: data.quantizable_layers });
+ } else if (data.layers) {
+ // Fallback if structure is different
+ set({ layers: data.layers.map(l => l.name) });
+ }
+ } catch (error) {
+ console.error('Failed to fetch layers:', error);
+ }
+ },
+
+ unloadModel: async () => {
+ try {
+ await fetch(`${API_BASE}/models/unload`, { method: 'POST' });
+ set({ modelInfo: null, layers: [], error: null });
+ toast.success('Model unloaded');
+ } catch (error) {
+ console.error('Failed to unload model:', error);
+ }
+ },
+
+ clearError: () => set({ error: null })
+}));
+
+// Quantization store - quantization operations and results
+export const useQuantizationStore = create((set, get) => ({
+ result: null,
+ isQuantizing: false,
+ progress: 0,
+ error: null,
+ history: [],
+
+ quantizeWeights: async (config) => {
+ set({ isQuantizing: true, progress: 0, error: null });
+
+ try {
+ const response = await fetch(`${API_BASE}/quantize/weights`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ in_features: config.inFeatures || 64,
+ out_features: config.outFeatures || 128,
+ bits: config.bits || 8,
+ method: config.method || 'int8',
+ mode: config.mode || 'symmetric',
+ group_size: config.groupSize || null,
+ weight_pattern: config.pattern || 'random',
+ dtype: config.dtype || 'float32'
+ })
+ });
+
+ const data = await response.json();
+
+ if (data.success) {
+ set({
+ result: data,
+ isQuantizing: false,
+ progress: 100,
+ history: [...get().history, {
+ timestamp: new Date().toISOString(),
+ config,
+ stats: data.stats
+ }]
+ });
+ toast.success('Custom weights quantized');
+ return data;
+ } else {
+ set({ error: data.error || 'Quantization failed', isQuantizing: false });
+ toast.error(data.error || 'Quantization failed');
+ return data;
+ }
+ } catch (error) {
+ set({ error: error.message, isQuantizing: false });
+ toast.error(error.message);
+ return { success: false, error: error.message };
+ }
+ },
+
+ quantizeLayer: async (layerName, config) => {
+ set({ isQuantizing: true, progress: 0, error: null });
+
+ try {
+ const response = await fetch(`${API_BASE}/quantize/layer`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ layer_name: layerName,
+ bits: config.bits || 8,
+ method: config.method || 'int8',
+ mode: config.mode || 'symmetric',
+ group_size: config.groupSize || null
+ })
+ });
+
+ const data = await response.json();
+
+ if (data.success) {
+ set({
+ result: data,
+ error: null,
+ isQuantizing: false,
+ progress: 100
+ });
+ toast.success(`Layer ${layerName} quantized`);
+ } else {
+ const errMsg = data.error || 'Quantization failed';
+ set({
+ result: null,
+ error: errMsg,
+ isQuantizing: false
+ });
+ toast.error(errMsg);
+ }
+ return data;
+ } catch (error) {
+ set({ error: error.message, isQuantizing: false });
+ toast.error(error.message);
+ return { success: false, error: error.message };
+ }
+ },
+
+ quantizeModel: async (config) => {
+ set({ isQuantizing: true, progress: 0, error: null });
+
+ try {
+ const response = await fetch(`${API_BASE}/quantize/model`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ bits: config.bits || 8,
+ method: config.method || 'int8',
+ mode: config.mode || 'symmetric',
+ group_size: config.groupSize || null
+ })
+ });
+
+ if (data.success) {
+ set({
+ result: data,
+ error: null,
+ isQuantizing: false,
+ progress: 100
+ });
+ toast.success(`Full Model quantized! Saved ${data.summary?.total_memory_saved_mb?.toFixed(2)} MB`);
+ } else {
+ const errMsg = data.error || 'Optimization interrupted';
+ set({
+ result: null,
+ error: errMsg,
+ isQuantizing: false
+ });
+ toast.error(errMsg);
+ }
+ return data;
+ } catch (error) {
+ set({ error: error.message, isQuantizing: false });
+ toast.error(error.message);
+ return { success: false, error: error.message };
+ }
+ },
+
+ compareMethod: async (methods = ['int8', 'int4', 'nf4'], layerName = null) => {
+ try {
+ const body = { methods };
+ if (layerName) {
+ body.layer_name = layerName;
+ }
+
+ const response = await fetch(`${API_BASE}/analysis/compare`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(body)
+ });
+ return await response.json();
+ } catch (error) {
+ toast.error(error.message);
+ return { error: error.message };
+ }
+ },
+
+ clearResult: () => set({ result: null, error: null }),
+ clearHistory: () => set({ history: [] })
+}));
+
+// UI store - navigation, theme, etc.
+export const useUIStore = create((set) => ({
+ sidebarOpen: true,
+ activeTab: 'quantize',
+ theme: 'dark',
+
+ toggleSidebar: () => set((state) => ({ sidebarOpen: !state.sidebarOpen })),
+ setActiveTab: (tab) => set({ activeTab: tab }),
+ setTheme: (theme) => set({ theme })
+}));
diff --git a/frontend/vite.config.js b/frontend/vite.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..084e8feb7f7db138651485c3414bbcaf0d1e9abf
--- /dev/null
+++ b/frontend/vite.config.js
@@ -0,0 +1,29 @@
+import { defineConfig } from 'vite'
+import react from '@vitejs/plugin-react'
+
+// https://vite.dev/config/
+export default defineConfig({
+ plugins: [react()],
+ server: {
+ port: 5173,
+ proxy: {
+ '/api': {
+ target: 'http://localhost:8000',
+ changeOrigin: true,
+ }
+ }
+ },
+ build: {
+ outDir: 'dist',
+ sourcemap: false,
+ rollupOptions: {
+ output: {
+ manualChunks: {
+ 'vendor': ['react', 'react-dom', 'react-router-dom'],
+ 'motion': ['framer-motion'],
+ 'charts': ['recharts']
+ }
+ }
+ }
+ }
+})