raayraay's picture
Update app.py
3c7d63c verified
"""
DeepSuite: The Quantum Auditor
A tool to determine if your data requires quantum computing or if classical methods suffice.
Implements the "Tang Test" (Dequantization Audit) from the DeepSuite Research Proposal.
Authors: Eric Raymond & Myalou
"""
import gradio as gr
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
# --- CUSTOM THEME (Quantum/Scientific Dark Mode) ---
quantum_theme = gr.themes.Base(
primary_hue="cyan",
secondary_hue="violet",
neutral_hue="slate",
).set(
body_background_fill="linear-gradient(135deg, #0f0f23 0%, #1a1a3e 100%)",
block_background_fill="#1e1e3f",
block_label_text_color="#00d4ff",
button_primary_background_fill="linear-gradient(90deg, #00d4ff, #7c3aed)",
button_primary_text_color="white",
input_background_fill="#2a2a5f",
input_border_color="#3a3a7f",
)
# --- SAMPLE DATA GENERATORS ---
def generate_mnist_sample():
"""
Generate synthetic data that mimics MNIST's statistical properties.
MNIST is famously low-rank due to digit structure lying on a manifold.
"""
N, D = 200, 784 # 28x28 flattened
# MNIST digits lie on a ~50-dimensional manifold
true_rank = 50
# Simulate structured image data (sparse activations, smooth gradients)
latent = np.random.randn(N, true_rank)
basis = np.random.randn(true_rank, D)
# Add spatial smoothness (adjacent pixels correlated)
for i in range(D - 1):
basis[:, i + 1] = 0.7 * basis[:, i] + 0.3 * basis[:, i + 1]
data = latent @ basis
# Normalize and add small noise
data = (data - data.mean()) / (data.std() + 1e-8)
data += 0.01 * np.random.randn(N, D)
return data, "MNIST-like sample: 200 images Γ— 784 pixels (28Γ—28 flattened)"
def generate_protein_sample():
"""
Generate synthetic protein sequence embeddings.
Protein data often has moderate rank due to evolutionary constraints.
"""
N, D = 150, 256 # 150 sequences, 256-dim embeddings
# Proteins have ~100 effective degrees of freedom (amino acid correlations)
true_rank = 80
A = np.random.randn(N, true_rank)
B = np.random.randn(true_rank, D)
# Add biological "motif" structure
motifs = np.zeros((N, D))
for i in range(N):
# Random conserved regions
start = np.random.randint(0, D - 20)
motifs[i, start:start + 20] = np.random.randn(20) * 2
data = A @ B + motifs + 0.1 * np.random.randn(N, D)
return data, "Protein embeddings: 150 sequences Γ— 256 dimensions"
def generate_quantum_chemistry_sample():
"""
Generate data mimicking quantum chemistry electron correlations.
This is genuinely high-rank due to electron entanglement.
"""
N, D = 100, 100 # Molecular orbitals
# Full-rank with complex correlations (simulating electron interactions)
data = np.random.randn(N, D)
# Add "entanglement" - nonlinear mixing that preserves rank
mixing = np.random.randn(D, D) * 0.3
data = np.tanh(data @ mixing) + np.sin(data @ mixing.T) * 0.5
# Add two-body correlations
for i in range(0, D - 1, 2):
data[:, i] += 0.3 * data[:, i + 1] * np.sign(data[:, i])
return data, "Quantum chemistry: 100 molecular states Γ— 100 orbitals"
# --- CORE ANALYSIS FUNCTIONS ---
def effective_rank(matrix):
"""
Compute effective rank using spectral entropy.
Formula: exp(-Ξ£ pα΅’ log(pα΅’)) where pα΅’ are normalized singular values.
"""
try:
s = linalg.svdvals(matrix)
except Exception:
return 0, np.array([])
s_sum = np.sum(s)
if s_sum == 0:
return 0, s
p = s / s_sum
p = p[p > 1e-10] # Avoid log(0)
entropy = -np.sum(p * np.log(p))
return np.exp(entropy), s
def condition_number_analysis(matrix):
"""
Compute condition number for numerical stability assessment.
High condition number = numerically unstable = harder to simulate classically.
"""
try:
s = linalg.svdvals(matrix)
if len(s) > 0 and s[-1] > 1e-10:
return s[0] / s[-1]
return np.inf
except Exception:
return np.inf
def coherence_estimate(matrix):
"""
Estimate row coherence (ΞΌ) for quantum state preparation cost.
Low coherence = efficient quantum state preparation.
"""
norms = np.linalg.norm(matrix, axis=1)
if np.max(norms) == 0:
return 0
return np.max(norms) ** 2 / (np.mean(norms ** 2) + 1e-10)
def sparsity_measure(matrix):
"""
Compute sparsity ratio (fraction of near-zero elements).
"""
threshold = np.std(matrix) * 0.01
sparse_count = np.sum(np.abs(matrix) < threshold)
return sparse_count / matrix.size
# --- MAIN AUDIT FUNCTION ---
def full_tang_audit(data_source, uploaded_file, data_type, num_samples, feature_dim, threshold_mult):
"""
Complete quantum advantage audit with multiple metrics.
Returns status, description, recommendation, and visualizations.
"""
N = int(num_samples)
D = int(feature_dim)
# --- DATA LOADING ---
if data_source == "Upload CSV" and uploaded_file is not None:
try:
# Gradio 4.x passes filepath directly as string
data = np.genfromtxt(uploaded_file, delimiter=',', skip_header=1)
if data.ndim == 1:
data = data.reshape(-1, 1)
# Handle NaN values
if np.any(np.isnan(data)):
data = np.nan_to_num(data, nan=0.0)
N, D = data.shape
data_desc = f"Uploaded data: {N} samples Γ— {D} features"
except Exception as e:
return f"❌ Error loading file: {e}", "", "", None, None
elif data_source == "πŸ“Š MNIST Sample (The Reveal)":
data, data_desc = generate_mnist_sample()
N, D = data.shape
elif data_source == "🧬 Protein Embeddings":
data, data_desc = generate_protein_sample()
N, D = data.shape
elif data_source == "βš›οΈ Quantum Chemistry":
data, data_desc = generate_quantum_chemistry_sample()
N, D = data.shape
else:
# Synthetic data generation
if data_type == "Low-Rank (Classical-Friendly)":
true_rank = max(2, int(np.log2(D)) * 2)
A = np.random.randn(N, true_rank)
B = np.random.randn(true_rank, D)
data = A @ B + 0.01 * np.random.randn(N, D)
data_desc = f"Synthetic low-rank (true rank β‰ˆ {true_rank})"
elif data_type == "High-Rank (Quantum-Favorable)":
data = np.random.randn(N, D)
mixing = np.random.randn(D, D) * 0.2
data = np.sin(data @ mixing) + np.cos(data @ mixing.T) * 0.5
data_desc = "Synthetic high-rank with nonlinear correlations"
elif data_type == "Sparse (Edge Case)":
data = np.zeros((N, D))
for i in range(N):
idx = np.random.choice(D, size=int(D * 0.1), replace=False)
data[i, idx] = np.random.randn(len(idx))
data_desc = "Synthetic sparse data (10% density)"
else: # Random Quantum State
data = np.random.randn(N, D)
# Orthogonalize to simulate quantum states
if N <= D:
data, _ = np.linalg.qr(data.T)
data = data.T[:N, :]
data_desc = "Simulated orthogonal quantum state amplitudes"
# --- COMPUTE GRAM MATRIX (Kernel) ---
# K = X @ X^T represents the inner product structure
gram = data @ data.T
# --- MULTI-METRIC ANALYSIS ---
r_eff, singular_values = effective_rank(gram)
cond_num = condition_number_analysis(gram)
coherence = coherence_estimate(data)
sparsity = sparsity_measure(data)
# --- DECISION LOGIC ---
threshold = threshold_mult * np.sqrt(N)
# Composite score (weighted metrics)
quantum_score = 0
# Effective rank is the primary indicator (40 points)
if r_eff > threshold:
quantum_score += 40
elif r_eff > threshold * 0.7:
quantum_score += 20
# Condition number affects classical simulation stability (25 points)
if cond_num > 1e8:
quantum_score += 25
elif cond_num > 1e4:
quantum_score += 15
elif cond_num < 1e6:
quantum_score += 5
# Low coherence is good for quantum (20 points)
if coherence < 5:
quantum_score += 20
elif coherence < 10:
quantum_score += 10
# Non-sparse data harder to approximate (15 points)
if sparsity < 0.1:
quantum_score += 15
elif sparsity < 0.3:
quantum_score += 8
# --- GENERATE VERDICT ---
if quantum_score >= 70:
status = "🟒 QUANTUM ADVANTAGE: HIGH CONFIDENCE"
status_color = "#00ff88"
verdict_emoji = "βœ…"
recommendation = f"""### Audit Results
| Metric | Value | Status |
|--------|-------|--------|
| **Effective Rank** | {r_eff:.1f} | βœ… Above threshold ({threshold:.1f}) |
| **Condition Number** | {cond_num:.2e} | {'βœ…' if cond_num > 1e4 else 'βž–'} |
| **Coherence (ΞΌ)** | {coherence:.2f} | {'βœ…' if coherence < 10 else '⚠️'} |
| **Sparsity** | {sparsity*100:.1f}% | {'βœ…' if sparsity < 0.3 else 'βž–'} |
---
### {verdict_emoji} VERDICT: Quantum Advantage Likely
Your data exhibits **high-dimensional structure** that resists low-rank classical approximation.
The Tang dequantization attack would require **exponential classical resources**.
**Recommendation:** Deploy QKSAM (Quantum Kernel Self-Attention) or equivalent quantum circuit.
"""
elif quantum_score >= 40:
status = "🟑 MARGINAL: REQUIRES INVESTIGATION"
status_color = "#ffcc00"
verdict_emoji = "⚠️"
recommendation = f"""### Audit Results
| Metric | Value | Status |
|--------|-------|--------|
| **Effective Rank** | {r_eff:.1f} | {'βœ…' if r_eff > threshold else '⚠️'} Threshold: {threshold:.1f} |
| **Condition Number** | {cond_num:.2e} | {'⚠️' if cond_num < 1e6 else 'βœ…'} |
| **Coherence (ΞΌ)** | {coherence:.2f} | {'βœ…' if coherence < 10 else '⚠️'} |
| **Sparsity** | {sparsity*100:.1f}% | βž– |
---
### {verdict_emoji} VERDICT: Inconclusive
Your data shows **mixed signals**. Quantum advantage is possible but not guaranteed.
**Recommendations:**
1. Increase sample size to improve rank estimation
2. Apply preprocessing to reduce coherence
3. Consider a **hybrid classical-quantum** approach
4. Run domain-specific benchmarks before committing QPU resources
"""
else:
status = "πŸ”΄ DEQUANTIZABLE: USE CLASSICAL METHODS"
status_color = "#ff4444"
verdict_emoji = "β›”"
recommendation = f"""### Audit Results
| Metric | Value | Status |
|--------|-------|--------|
| **Effective Rank** | {r_eff:.1f} | ❌ Below threshold ({threshold:.1f}) |
| **Condition Number** | {cond_num:.2e} | {'βœ… Stable' if cond_num < 1e6 else 'βž–'} |
| **Coherence (ΞΌ)** | {coherence:.2f} | βž– |
| **Sparsity** | {sparsity*100:.1f}% | βž– |
---
### {verdict_emoji} VERDICT: No Quantum Advantage
Your data has **low effective rank**, meaning it lies on a low-dimensional manifold.
Classical methods can efficiently approximate this kernel via the **Tang dequantization** attack.
**Recommendation:** Use `sklearn.kernel_approximation.Nystroem` or randomized SVD.
```python
from sklearn.kernel_approximation import Nystroem
approx = Nystroem(n_components={max(10, int(r_eff))}, random_state=42)
X_transformed = approx.fit_transform(X)
```
**Do NOT waste QPU hours on this problem.**
"""
# --- VISUALIZATION 1: Spectral Decay ---
fig1, ax1 = plt.subplots(figsize=(8, 5), facecolor='#1e1e3f')
ax1.set_facecolor('#1e1e3f')
n_plot = min(60, len(singular_values))
x_vals = np.arange(n_plot)
# Plot singular values
ax1.semilogy(x_vals, singular_values[:n_plot], 'o-',
color='#00d4ff', linewidth=2, markersize=5, label='Singular Values')
# Fill area under curve
ax1.fill_between(x_vals, singular_values[:n_plot], alpha=0.3, color='#7c3aed')
# Effective rank line
ax1.axvline(x=min(r_eff, n_plot - 1), color=status_color, linestyle='--',
linewidth=2.5, label=f'Effective Rank = {r_eff:.1f}')
# Threshold line
ax1.axvline(x=min(threshold, n_plot - 1), color='#888888', linestyle=':',
linewidth=2, label=f'Threshold = {threshold:.1f}')
# Styling
ax1.set_xlabel('Singular Value Index', color='white', fontsize=12)
ax1.set_ylabel('Magnitude (log scale)', color='white', fontsize=12)
ax1.set_title('πŸ“‰ Spectral Decay Analysis', color='white', fontsize=14, fontweight='bold')
ax1.tick_params(colors='white', labelsize=10)
ax1.legend(facecolor='#2a2a5f', edgecolor='#00d4ff', labelcolor='white', fontsize=10)
ax1.grid(True, alpha=0.2, color='white')
ax1.set_xlim(-1, n_plot)
plt.tight_layout()
# --- VISUALIZATION 2: Score Gauge ---
fig2, ax2 = plt.subplots(figsize=(4, 4), facecolor='#1e1e3f', subplot_kw={'aspect': 'equal'})
ax2.set_facecolor('#1e1e3f')
# Create donut chart as gauge
score_color = '#00ff88' if quantum_score >= 70 else '#ffcc00' if quantum_score >= 40 else '#ff4444'
colors_gauge = [score_color, '#333355']
wedges, _ = ax2.pie(
[quantum_score, 100 - quantum_score],
colors=colors_gauge,
startangle=90,
wedgeprops=dict(width=0.35, edgecolor='#1e1e3f')
)
# Center text
ax2.text(0, 0.05, f'{quantum_score}', ha='center', va='center',
fontsize=36, color='white', fontweight='bold')
ax2.text(0, -0.25, 'Q-Score', ha='center', va='center',
fontsize=12, color='#888888')
ax2.set_title('Quantum Advantage Score', color='white', fontsize=12, fontweight='bold', pad=10)
plt.tight_layout()
return status, data_desc, recommendation, fig1, fig2
# --- RAG CONSULTANT (Architecture Validator) ---
def rag_consultant(ansatz_type, problem_domain):
"""
Provides expert guidance on quantum circuit architecture selection.
Deterministic and hallucination-free by design.
"""
domain_notes = {
"NLP / Sequences": "Sequential data with local correlations favors 1D tensor network structures.",
"Computer Vision": "2D spatial structure benefits from hierarchical, multi-scale ansΓ€tze.",
"Drug Discovery / Molecular": "Graph-structured data requires permutation-equivariant circuits.",
"Finance / Time Series": "Long-range temporal dependencies need high bond dimension or attention.",
"Quantum Chemistry": "Fermionic systems require particle-number conserving ansΓ€tze (e.g., UCCSD).",
}
reports = {
"Matrix Product State (MPS)": {
"icon": "πŸ”—",
"structure": "1D chain of tensors with bond dimension Ο‡",
"complexity": "Classical: O(Nχ³), Quantum: O(N log Ο‡)",
"entanglement": "Area-law (limited to 1D nearest-neighbor)",
"best_for": ["Sequential data (text, audio)", "Time series", "1D physical systems"],
"warnings": [
"Bond dimension Ο‡ > 100 causes classical simulation slowdown",
"Cannot efficiently capture 2D or long-range correlations"
],
"verdict": "βœ… RECOMMENDED",
"verdict_detail": "Excellent for sequential data. Start with Ο‡=32, scale up as needed."
},
"MERA (Multi-scale Entanglement Renormalization)": {
"icon": "🌲",
"structure": "Hierarchical tree with disentangler layers",
"complexity": "Depth: O(log N), Parameters: O(N)",
"entanglement": "Logarithmic scaling (captures critical phenomena)",
"best_for": ["Images and 2D data", "Scale-invariant systems", "Renormalization group flows"],
"warnings": [
"Requires high qubit coherence times (deep circuits)",
"Optimization landscape is complex",
"Not native to most NISQ hardware topologies"
],
"verdict": "⚠️ CONDITIONAL",
"verdict_detail": "Powerful but challenging on NISQ. Use for >100 qubit fault-tolerant systems."
},
"Hardware Efficient Ansatz (HEA)": {
"icon": "πŸ”§",
"structure": "Alternating rotation and entangling layers (native gates)",
"complexity": "Depth: O(L), Parameters: O(L Γ— N)",
"entanglement": "Unstructured (no geometric prior)",
"best_for": ["Hardware benchmarking", "Small proof-of-concept demos"],
"warnings": [
"🚨 BARREN PLATEAU RISK: Gradients vanish exponentially with depth",
"No inductive bias β†’ poor generalization",
"Expressibility β‰  Trainability"
],
"verdict": "❌ NOT RECOMMENDED",
"verdict_detail": "Avoid for production ML. Use only for hardware characterization."
},
"Equivariant Quantum Neural Network (EQNN)": {
"icon": "πŸ”„",
"structure": "Symmetry-preserving parameterized circuits",
"complexity": "Reduced parameters due to weight sharing",
"entanglement": "Structured by symmetry group",
"best_for": ["Molecular simulations (SO(3) symmetry)", "Physics-informed ML", "Graphs (permutation symmetry)"],
"warnings": [
"Requires domain expertise to identify correct symmetry",
"Implementation complexity higher than HEA"
],
"verdict": "βœ… RECOMMENDED",
"verdict_detail": "Best practice for scientific ML. Mitigates barren plateaus via symmetry."
}
}
r = reports.get(ansatz_type, reports["Hardware Efficient Ansatz (HEA)"])
domain_note = domain_notes.get(problem_domain, "Consider the natural structure of your data.")
warnings_formatted = "\n".join(f"- {w}" for w in r["warnings"])
best_for_formatted = "\n".join(f"- {b}" for b in r["best_for"])
return f"""## {r['icon']} {ansatz_type}
### Architecture Overview
| Property | Value |
|----------|-------|
| **Structure** | {r['structure']} |
| **Complexity** | {r['complexity']} |
| **Entanglement** | {r['entanglement']} |
### Best Applications
{best_for_formatted}
### ⚠️ Warnings & Risks
{warnings_formatted}
### 🎯 Domain Analysis: {problem_domain}
{domain_note}
---
## {r['verdict']}: {r['verdict_detail']}
"""
# --- THEORY / EDUCATIONAL CONTENT ---
theory_content = """
## πŸ“š The Tang Test: Theory & Background
### Why This Matters
In 2018, **Ewin Tang** (then an 18-year-old undergraduate) shocked the quantum computing community by showing that many celebrated "quantum speedups" could be matched by classical algorithms, if the data has low rank.
This tool implements a practical version of Tang's theoretical framework to help you determine **before** you spend QPU hours whether your problem actually needs a quantum computer.
---
### The Core Insight
Quantum machine learning algorithms often promise speedups for kernel methods. But these speedups assume the kernel matrix has **high effective rank**. If your data lies on a low-dimensional manifold (as most real-world data does), classical randomized methods like **NystrΓΆm approximation** can achieve similar results.
---
### What We Measure
| Metric | What It Captures | Quantum-Favorable |
|--------|------------------|-------------------|
| **Effective Rank** | Dimensionality of data manifold | High (> √N) |
| **Condition Number** | Numerical stability for inversion | High (> 10⁢) |
| **Coherence (ΞΌ)** | Cost of quantum state preparation | Low (< 10) |
| **Sparsity** | Fraction of near-zero elements | Low (< 30%) |
---
### The Mathematics
**Effective Rank** is computed via spectral entropy:
```
R_eff = exp(H) where H = -Ξ£α΅’ pα΅’ log(pα΅’)
pα΅’ = Οƒα΅’ / Ξ£β±Ό Οƒβ±Ό (normalized singular values)
```
This measures how "spread out" the singular value spectrum is. A matrix with one dominant singular value has R_eff β‰ˆ 1. A matrix with uniform singular values has R_eff β‰ˆ rank.
**The Decision Rule:**
- If R_eff < √N β†’ Classical NystrΓΆm approximation is efficient
- If R_eff > √N β†’ Quantum methods may offer genuine advantage
---
### The Controversial Truth About MNIST
When you run MNIST through this tool, you'll see it's **dequantizable**. Despite having 784 dimensions (28Γ—28 pixels), handwritten digits lie on a ~50-dimensional manifold. This is why:
1. Autoencoders can compress MNIST to 32 dimensions with minimal loss
2. PCA with 50 components captures >95% of variance
3. Quantum kernel methods offer **no advantage** over classical kernels
This doesn't mean quantum ML is useless, it means we need to find the *right* problems.
---
### When Quantum Actually Helps
Genuine quantum advantage is expected for:
- **Quantum chemistry**: Electron correlations are genuinely high-rank
- **Cryptographic data**: Pseudorandom data has high effective rank by design
- **Certain optimization landscapes**: Where quantum tunneling helps
---
### References
1. Tang, E. (2019). *A quantum-inspired classical algorithm for recommendation systems*. STOC 2019. [arXiv:1807.04271](https://arxiv.org/abs/1807.04271)
2. Aaronson, S. (2015). *Read the fine print*. [Blog post](https://scottaaronson.blog/?p=2555)
3. Huang, H.Y., et al. (2021). *Power of data in quantum machine learning*. Nature Communications. [arXiv:2011.01938](https://arxiv.org/abs/2011.01938)
4. McClean, J.R., et al. (2018). *Barren plateaus in quantum neural network training landscapes*. Nature Communications.
---
### About DeepSuite
This tool is part of the **DeepSuite** research project, which aims to bridge the "deployment gap" between quantum algorithms and practical implementations.
The full DeepSuite pipeline includes:
1. **GENERATE**: Automated quantum circuit synthesis
2. **VERIFY**: This auditor (Tang Test + Architecture Validation)
3. **DEPLOY**: Hardware-aware compilation and execution
"""
# --- MAIN GRADIO APPLICATION ---
with gr.Blocks(
theme=quantum_theme,
title="DeepSuite: Quantum Auditor",
css="""
.main-header { text-align: center; margin-bottom: 1rem; }
.metric-pass { color: #00ff88 !important; }
.metric-fail { color: #ff4444 !important; }
.metric-warn { color: #ffcc00 !important; }
footer { visibility: hidden; }
"""
) as demo:
# --- HEADER ---
gr.Markdown("""
<div class="main-header">
# βš›οΈ DeepSuite: The Quantum Auditor
### Stop guessing. Start verifying.
Determine if your data requires quantum computing, or if classical methods suffice.
</div>
""")
# --- MAIN TABS ---
with gr.Tabs():
# ==================== TAB 1: TANG TEST ====================
with gr.Tab("πŸ”¬ Tang Test (Dequantization Audit)"):
gr.Markdown("""
Upload your data or select a preset to run the **Tang Test**, a mathematical audit
that determines if quantum methods offer genuine advantage over classical approximations.
""")
with gr.Row():
# --- LEFT COLUMN: INPUTS ---
with gr.Column(scale=1):
data_source = gr.Radio(
choices=[
"Synthetic Data",
"Upload CSV",
"πŸ“Š MNIST Sample (The Reveal)",
"🧬 Protein Embeddings",
"βš›οΈ Quantum Chemistry"
],
label="πŸ“ Data Source",
value="Synthetic Data",
info="Try 'MNIST Sample' to see why image data doesn't need quantum!"
)
uploaded_file = gr.File(
label="Upload CSV (rows = samples, cols = features)",
file_types=[".csv"],
visible=False
)
data_type = gr.Dropdown(
choices=[
"Low-Rank (Classical-Friendly)",
"High-Rank (Quantum-Favorable)",
"Sparse (Edge Case)",
"Random Quantum State"
],
label="Synthetic Data Type",
value="Low-Rank (Classical-Friendly)",
visible=True
)
with gr.Row():
num_samples = gr.Slider(
minimum=50, maximum=500, value=200, step=50,
label="Samples (N)",
info="Number of data points"
)
feature_dim = gr.Slider(
minimum=20, maximum=200, value=100, step=20,
label="Features (D)",
info="Dimensionality"
)
threshold_mult = gr.Slider(
minimum=0.5, maximum=2.0, value=1.0, step=0.1,
label="🎚️ Threshold Sensitivity",
info="1.0 = standard (√N). Lower = stricter, Higher = lenient"
)
audit_btn = gr.Button(
"πŸ” Run Full Audit",
variant="primary",
size="lg"
)
# --- RIGHT COLUMN: RESULTS ---
with gr.Column(scale=1):
status_output = gr.Textbox(
label="⚑ Audit Status",
lines=1,
interactive=False
)
data_desc_output = gr.Textbox(
label="πŸ“‹ Data Description",
lines=1,
interactive=False
)
recommendation_output = gr.Markdown(
label="πŸ“Š Detailed Report"
)
# --- VISUALIZATIONS ---
with gr.Row():
plot_spectral = gr.Plot(label="Spectral Decay Analysis")
plot_gauge = gr.Plot(label="Quantum Advantage Score")
# --- DYNAMIC UI UPDATES ---
def update_ui_visibility(source):
show_upload = source == "Upload CSV"
show_synthetic = source == "Synthetic Data"
return (
gr.update(visible=show_upload),
gr.update(visible=show_synthetic)
)
data_source.change(
fn=update_ui_visibility,
inputs=data_source,
outputs=[uploaded_file, data_type]
)
# ==================== TAB 2: ARCHITECTURE VALIDATOR ====================
with gr.Tab("πŸ—οΈ Circuit Architecture Validator"):
gr.Markdown("""
Select a quantum circuit ansatz and problem domain to receive expert guidance
on architecture selection. This simulates the RAG-based validation step in DeepSuite.
""")
with gr.Row():
with gr.Column(scale=1):
ansatz_select = gr.Dropdown(
choices=[
"Matrix Product State (MPS)",
"MERA (Multi-scale Entanglement Renormalization)",
"Hardware Efficient Ansatz (HEA)",
"Equivariant Quantum Neural Network (EQNN)"
],
label="πŸ”§ Proposed Quantum Ansatz",
value="Hardware Efficient Ansatz (HEA)"
)
domain_select = gr.Dropdown(
choices=[
"NLP / Sequences",
"Computer Vision",
"Drug Discovery / Molecular",
"Finance / Time Series",
"Quantum Chemistry"
],
label="🎯 Problem Domain",
value="NLP / Sequences"
)
with gr.Column(scale=2):
rag_output = gr.Markdown(
value="*Select an ansatz and domain to see the analysis...*"
)
# Auto-update on selection change
gr.on(
triggers=[ansatz_select.change, domain_select.change],
fn=rag_consultant,
inputs=[ansatz_select, domain_select],
outputs=rag_output
)
# ==================== TAB 3: THEORY ====================
with gr.Tab("πŸ“š How It Works"):
gr.Markdown(theory_content)
# --- WIRE UP MAIN AUDIT ---
audit_btn.click(
fn=full_tang_audit,
inputs=[
data_source,
uploaded_file,
data_type,
num_samples,
feature_dim,
threshold_mult
],
outputs=[
status_output,
data_desc_output,
recommendation_output,
plot_spectral,
plot_gauge
]
)
# --- FOOTER ---
gr.Markdown("""
---
<center>
**DeepSuite Quantum Auditor** | Built by: Eric Raymond & Myalou | Purdue AI/Robotics Engineering
*"Don't guess. Verify."*
</center>
""")
# --- LAUNCH ---
if __name__ == "__main__":
demo.launch()