import { useState, useEffect } from 'react'; import { Layers, Play, Settings2, Zap, RefreshCw, Download } from 'lucide-react'; import { useQuantizationStore, useModelStore } from '../store'; import { motion, AnimatePresence } from 'framer-motion'; import { BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, Cell, AreaChart, Area } from 'recharts'; /** * Quantizer page - main quantization interface */ export default function Quantizer() { const { result, isQuantizing, quantizeWeights, quantizeLayer, quantizeModel, clearResult } = useQuantizationStore(); const { modelInfo, layers, fetchLayers } = useModelStore(); // Configuration state const [config, setConfig] = useState({ inFeatures: 64, outFeatures: 128, bits: 8, method: 'int8', mode: 'symmetric', groupSize: null, pattern: 'random', dtype: 'float32' }); const [activeTab, setActiveTab] = useState('heatmaps'); const [source, setSource] = useState('custom'); // 'custom' | 'layer' const [target, setTarget] = useState('single'); // 'single' | 'full' const [selectedLayer, setSelectedLayer] = useState(''); // Switch to layer mode if model is loaded useEffect(() => { if (modelInfo) { setSource('layer'); if (layers.length === 0) fetchLayers(); } }, [modelInfo]); const handleQuantize = async () => { if (source === 'layer') { if (target === 'full') { await quantizeModel(config); } else if (selectedLayer) { await quantizeLayer(selectedLayer, config); } } else { await quantizeWeights(config); } }; const updateConfig = (key, value) => { setConfig((prev) => ({ ...prev, [key]: value })); // Auto-update method based on bits if (key === 'bits') { if (value === 4) { setConfig((prev) => ({ ...prev, bits: value, method: 'int4', groupSize: 128 })); } else { setConfig((prev) => ({ ...prev, bits: value, method: 'int8', groupSize: null })); } } }; // Convert histogram data for Recharts const getHistogramData = (viz) => { if (!viz?.data) return []; const { x, y } = viz.data; if (!x || !y) return []; return x.map((val, i) => ({ value: typeof val === 'number' ? val.toFixed(3) : val, count: y[i] })); }; // Generate heatmap as a simple statistical summary const getHeatmapStats = (viz) => { if (!viz?.data?.z) return null; const z = viz.data.z; const flat = z.flat(); return { min: Math.min(...flat).toFixed(4), max: Math.max(...flat).toFixed(4), mean: (flat.reduce((a, b) => a + b, 0) / flat.length).toFixed(4), rows: z.length, cols: z[0]?.length || 0 }; }; return (
Quantize neural network weights to lower precision formats
{modelInfo && ({result.layer_name}
{JSON.stringify(result.stats.original_shape)}
{JSON.stringify(result.stats.quantized_shape)}
{JSON.stringify(result.stats.scales_shape)}
{result.stats.original_dtype}
{result.stats.quantized_dtype}
{result.stats.max_error.toExponential(4)}
{result.stats.mean_error.toExponential(4)}
{result.stats.memory_savings_percent.toFixed(2)}%
Configure your quantization settings and click "Quantize" to see the results.
| Layer | Shape | Error | Saved |
|---|---|---|---|
| {layer.layer_name.split('.').slice(-2).join('.')} | {JSON.stringify(layer.shape)} | {layer.error?.toExponential(2) || 'N/A'} | {layer.memory_savings_percent?.toFixed(1)}% |