Type / __init__ (1) (4).py
Ananthusajeev190's picture
Upload 200 files
713f732 verified
"""
quotom_ai.py
Single-file demo: quantum (single-qubit) simulator + neural network that learns
to predict short-time evolution of the qubit state under a tunable Hamiltonian.
Requirements:
pip install numpy scipy torch
Author: ChatGPT (Quotom mechanics AI example)
"""
import numpy as np
from scipy.linalg import expm, eig
import torch
import torch.nn as nn
import torch.optim as optim
from typing import Tuple
# ---------------------------
# Quantum simulation utilities
# ---------------------------
# Pauli matrices (2x2)
sigma_x = np.array([[0, 1], [1, 0]], dtype=complex)
sigma_y = np.array([[0, -1j], [1j, 0]], dtype=complex)
sigma_z = np.array([[1, 0], [0, -1]], dtype=complex)
I2 = np.eye(2, dtype=complex)
def random_bloch_state() -> np.ndarray:
"""Return a normalized 2-vector |psi> (complex) representing a pure qubit state."""
# sample angles on Bloch sphere
theta = np.arccos(1 - 2 * np.random.rand()) # 0..pi
phi = 2 * np.pi * np.random.rand() # 0..2pi
a = np.cos(theta / 2)
b = np.sin(theta / 2) * np.exp(1j * phi)
state = np.array([a, b], dtype=complex)
# normalization check (should already be normalized)
state = state / np.linalg.norm(state)
return state
def hamiltonian_from_params(ax: float, ay: float, az: float) -> np.ndarray:
"""Build a simple Hamiltonian H = ax * X + ay * Y + az * Z."""
return ax * sigma_x + ay * sigma_y + az * sigma_z
def time_evolution_unitary(H: np.ndarray, dt: float) -> np.ndarray:
"""Compute U = exp(-i H dt) using scipy.linalg.expm (2x2 matrices)."""
return expm(-1j * H * dt)
def evolve_state(state: np.ndarray, H: np.ndarray, dt: float) -> np.ndarray:
"""Return |psi(t+dt)> = U |psi(t)>."""
U = time_evolution_unitary(H, dt)
return U @ state
# ---------------------------
# Dataset generation
# ---------------------------
def generate_dataset(n_samples: int,
dt: float = 0.05,
param_scale: float = 2.0,
seed: int = 0) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate dataset of (input -> target) where:
input: [Re(psi0), Im(psi0), ax, ay, az]
target: [Re(psi1), Im(psi1)]
psi vectors have 2 complex components -> represented as 4 reals.
"""
rng = np.random.default_rng(seed)
X = np.zeros((n_samples, 4 + 3), dtype=float) # 4 for state (real/imag), 3 for a params
Y = np.zeros((n_samples, 4), dtype=float) # next state's real/imag for 2 components
for i in range(n_samples):
psi0 = random_bloch_state()
# sample Hamiltonian coefficients from a normal distribution
ax, ay, az = param_scale * (rng.standard_normal(3))
H = hamiltonian_from_params(ax, ay, az)
psi1 = evolve_state(psi0, H, dt)
# flatten real/imag parts: [Re0, Re1, Im0, Im1] - but we'll use [Re0, Im0, Re1, Im1] for clarity
X[i, 0] = psi0[0].real
X[i, 1] = psi0[0].imag
X[i, 2] = psi0[1].real
X[i, 3] = psi0[1].imag
X[i, 4] = ax
X[i, 5] = ay
X[i, 6] = az
Y[i, 0] = psi1[0].real
Y[i, 1] = psi1[0].imag
Y[i, 2] = psi1[1].real
Y[i, 3] = psi1[1].imag
return X.astype(np.float32), Y.astype(np.float32)
# ---------------------------
# PyTorch model
# ---------------------------
class QuotomNet(nn.Module):
"""
Small feedforward network mapping:
input_dim = 7 (state real/imag ×2 + 3 hamiltonian params)
-> predicts next state (4 floats).
"""
def __init__(self, input_dim=7, hidden=128, out_dim=4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, hidden),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.ReLU(),
nn.Linear(hidden, out_dim)
)
def forward(self, x):
return self.net(x)
# ---------------------------
# Training / utility
# ---------------------------
def train_model(model, X_train, Y_train, X_val=None, Y_val=None,
epochs=60, batch_size=256, lr=1e-3, device='cpu'):
model.to(device)
opt = optim.Adam(model.parameters(), lr=lr)
loss_fn = nn.MSELoss()
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(X_train), torch.from_numpy(Y_train)
)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0.0
for xb, yb in loader:
xb = xb.to(device)
yb = yb.to(device)
pred = model(xb)
loss = loss_fn(pred, yb)
opt.zero_grad()
loss.backward()
opt.step()
total_loss += loss.item() * xb.size(0)
avg_loss = total_loss / len(dataset)
if epoch % 10 == 0 or epoch == 1:
msg = f"Epoch {epoch:3d}/{epochs} train loss {avg_loss:.6e}"
if X_val is not None:
val_loss = evaluate_model(model, X_val, Y_val, device=device)
msg += f", val loss {val_loss:.6e}"
print(msg)
return model
def evaluate_model(model, X, Y, device='cpu') -> float:
model.eval()
with torch.no_grad():
xb = torch.from_numpy(X).to(device)
yb = torch.from_numpy(Y).to(device)
pred = model(xb)
loss = nn.MSELoss()(pred, yb).item()
return loss
def complex_state_from_vector(vec: np.ndarray) -> np.ndarray:
"""vec is [Re0, Im0, Re1, Im1] -> return complex 2-vector."""
return np.array([vec[0] + 1j * vec[1], vec[2] + 1j * vec[3]], dtype=complex)
# ---------------------------
# Quick demo run
# ---------------------------
def demo():
# hyperparams
n_train = 8000
n_val = 1000
dt = 0.05
seed = 42
print("Generating dataset...")
X_train, Y_train = generate_dataset(n_train, dt=dt, seed=seed)
X_val, Y_val = generate_dataset(n_val, dt=dt, seed=seed + 1)
# scale Hamiltonian params for model stability (simple standardization)
# We'll compute mean/std of the param columns and apply same transform to both sets.
param_mean = X_train[:, 4:7].mean(axis=0, keepdims=True)
param_std = X_train[:, 4:7].std(axis=0, keepdims=True) + 1e-9
X_train[:, 4:7] = (X_train[:, 4:7] - param_mean) / param_std
X_val[:, 4:7] = (X_val[:, 4:7] - param_mean) / param_std
# Build and train model
model = QuotomNet(input_dim=7, hidden=128, out_dim=4)
print("Training model...")
model = train_model(model, X_train, Y_train, X_val=X_val, Y_val=Y_val,
epochs=60, batch_size=256, lr=1e-3)
# Evaluate and show qualitative example
val_loss = evaluate_model(model, X_val, Y_val)
print(f"Final validation MSE: {val_loss:.6e}")
# pick a few validation examples and compare predicted vs true complex states:
i_samples = np.random.choice(len(X_val), size=6, replace=False)
model.eval()
with torch.no_grad():
X_sel = torch.from_numpy(X_val[i_samples]).float()
preds = model(X_sel).numpy()
print("\nExample predictions (showing fidelity between predicted and true states):")
for idx, i in enumerate(i_samples):
pred_vec = preds[idx]
true_vec = Y_val[i]
psi_pred = complex_state_from_vector(pred_vec)
psi_true = complex_state_from_vector(true_vec)
# normalize predictions (model might not output normalized complex vectors)
psi_pred = psi_pred / np.linalg.norm(psi_pred)
psi_true = psi_true / np.linalg.norm(psi_true)
# state fidelity for pure states = |<psi_true|psi_pred>|^2
fidelity = np.abs(np.vdot(psi_true, psi_pred)) ** 2
print(f" sample {i}: fidelity = {fidelity:.6f}")
# small targeted test: compare model vs exact evolution for one random sample
print("\nTargeted check vs exact quantum evolution:")
psi0 = random_bloch_state()
ax, ay, az = (1.1, -0.7, 0.3) # chosen params
H = hamiltonian_from_params(ax, ay, az)
psi1_true = evolve_state(psi0, H, dt)
# build feature vector (remember to standardize params using param_mean/std used earlier)
feat = np.zeros((1, 7), dtype=np.float32)
feat[0, 0] = psi0[0].real
feat[0, 1] = psi0[0].imag
feat[0, 2] = psi0[1].real
feat[0, 3] = psi0[1].imag
feat[0, 4:7] = (np.array([ax, ay, az]) - param_mean.ravel()) / param_std.ravel()
model.eval()
with torch.no_grad():
pred = model(torch.from_numpy(feat)).numpy().ravel()
psi_pred = complex_state_from_vector(pred)
psi_pred = psi_pred / np.linalg.norm(psi_pred)
psi_true = psi1_true / np.linalg.norm(psi1_true)
fidelity = np.abs(np.vdot(psi_true, psi_pred)) ** 2
print(f"Fidelity between predicted and exact evolved state: {fidelity:.6f}")
if __name__ == "__main__":
demo()