Spaces:
Sleeping
Sleeping
File size: 4,313 Bytes
7b58366 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import time
import os
import numpy as np
# OpenMP: number of parallel threads.
os.environ["OMP_NUM_THREADS"] = "1"
# PyTorch
import torch
import torch.nn as nn
# Pennylane
import pennylane as qml
import torchvision
torch.manual_seed(42)
def build_hybrid_model(pennylane_dev, device, n_qubits=4, q_depth=6, q_delta=0.01):
"""
Builds/returns the hybrid model
Args:
pennylane_dev (qml.device): The Pennylane Backend
device (torch.device): PyTorch configuration
n_qubits (int): Number of qubits
q_depth (int): Depth of the quantum circuit (number of variational layers)
q_delta (float): Initial spread of random quantum weights
Returns:
torchvision.models.resnet: The hybrid model
"""
model_hybrid = torchvision.models.resnet18(pretrained=True)
for param in model_hybrid.parameters():
param.requires_grad = False
# Notice that model_hybrid.fc is the last layer of ResNet18
model_hybrid.fc = DressedQuantumNet(
n_qubits=n_qubits,
q_depth=q_depth,
q_delta=q_delta,
pennylane_dev=pennylane_dev,
device=device
)
# Use CUDA or CPU according to the "device" object.
model_hybrid = model_hybrid.to(device)
return model_hybrid
def H_layer(nqubits):
"""Layer of single-qubit Hadamard gates."""
for idx in range(nqubits):
qml.Hadamard(wires=idx)
def RY_layer(w):
"""Layer of parametrized qubit rotations around the y axis."""
for idx, element in enumerate(w):
qml.RY(element, wires=idx)
def entangling_layer(nqubits):
'''Layer of CNOTs followed by another shifted layer of CNOT.'''
# In other words it should apply something like :
# CNOT CNOT CNOT CNOT... CNOT
# CNOT CNOT CNOT... CNOT
for i in range(0, nqubits - 1, 2): # Loop over even indices: i=0,2,...N-2
qml.CNOT(wires=[i, i + 1])
for i in range(1, nqubits - 1, 2): # Loop over odd indices: i=1,3,...N-3
qml.CNOT(wires=[i, i + 1])
def quantum_net(q_input_features, q_weights_flat, n_qubits, q_depth):
"""
The variational quantum circuit.
"""
# Reshape weights
q_weights = q_weights_flat.reshape(q_depth, n_qubits)
# Start from state |+> , unbiased w.r.t. |0> and |1>
H_layer(n_qubits)
# Embed features in the quantum node
RY_layer(q_input_features)
# Sequence of trainable variational layers
for k in range(q_depth):
entangling_layer(n_qubits)
RY_layer(q_weights[k])
# Expectation values in the Z basis
exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(n_qubits)]
return tuple(exp_vals)
class DressedQuantumNet(nn.Module):
"""
Torch module implementing the *dressed* quantum net.
"""
def __init__(self, n_qubits, q_depth, q_delta, pennylane_dev, device):
"""
Definition of the *dressed* layout.
"""
super().__init__()
self.n_qubits = n_qubits
self.q_depth = q_depth
self.q_delta = q_delta
self.pennylane_dev = pennylane_dev
self.device = device
self.pre_net = nn.Linear(512, n_qubits)
self.q_params = nn.Parameter(q_delta * torch.randn(q_depth * n_qubits))
self.post_net = nn.Linear(n_qubits, 4)
def forward(self, input_features):
"""
Defining how tensors are supposed to move through the *dressed* quantum
net.
"""
# obtain the input features for the quantum circuit
# by reducing the feature dimension from 512 to 4
pre_out = self.pre_net(input_features)
q_in = torch.tanh(pre_out) * np.pi / 2.0
# Create Quantum Net
qn = qml.QNode(
func=quantum_net,
device=self.pennylane_dev,
interface="torch"
)
# Apply the quantum circuit to each element of the batch and append to q_out
q_out = torch.Tensor(0, self.n_qubits)
q_out = q_out.to(self.device)
for elem in q_in:
q_out_elem = torch.hstack(qn(elem, self.q_params, self.n_qubits, self.q_depth)).float().unsqueeze(0)
q_out = torch.cat((q_out, q_out_elem))
# return the two-dimensional prediction from the postprocessing layer
return self.post_net(q_out)
|