repo
stringclasses 885
values | file
stringclasses 741
values | content
stringlengths 4
215k
|
|---|---|---|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
import numpy as np
import matplotlib.pyplot as plt
import itertools
from pprint import pprint
import pickle
import time
import datetime
# Import qubit states Zero (|0>) and One (|1>), and Pauli operators (X, Y, Z)
from qiskit.opflow import Zero, One, I, X, Y, Z
from qiskit import QuantumCircuit, QuantumRegister, IBMQ, execute, transpile, Aer
from qiskit.tools.monitor import job_monitor
from qiskit.circuit import Parameter
from qiskit.transpiler.passes import RemoveBarriers
# Import QREM package
from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
from qiskit.ignis.mitigation import expectation_value
# Import mitiq for zne
import mitiq
# Import state tomography modules
from qiskit.ignis.verification.tomography import state_tomography_circuits
from qiskit.quantum_info import state_fidelity
import sys
import importlib
sys.path.append("../utils/")
import circuit_utils, zne_utils, tomography_utils, sgs_algorithm
importlib.reload(circuit_utils)
importlib.reload(zne_utils)
importlib.reload(tomography_utils)
importlib.reload(sgs_algorithm)
from circuit_utils import *
from zne_utils import *
from tomography_utils import *
from sgs_algorithm import *
# Combine subcircuits into a single multiqubit gate representing a single trotter step
num_qubits = 3
# The final time of the state evolution
target_time = np.pi
# Parameterize variable t to be evaluated at t=pi later
dt = Parameter('t')
# Convert custom quantum circuit into a gate
trot_gate = trotter_gate(dt)
# initial layout
initial_layout = [5,3,1]
# Number of trotter steps
num_steps = 100
print("trotter step: ", num_steps)
scale_factors = [1.0, 2.0, 3.0]
# Initialize quantum circuit for 3 qubits
qr = QuantumRegister(num_qubits, name="q")
qc = QuantumCircuit(qr)
# Prepare initial state (remember we are only evolving 3 of the 7 qubits on jakarta qubits (q_5, q_3, q_1) corresponding to the state |110>)
make_initial_state(qc, "110") # DO NOT MODIFY (|q_5,q_3,q_1> = |110>)
subspace_encoder_init110(qc, targets=[0, 1, 2]) # encode
trotterize(qc, trot_gate, num_steps, targets=[1, 2]) # Simulate time evolution under H_heis3 Hamiltonian
subspace_decoder_init110(qc, targets=[0, 1, 2]) # decode
# Evaluate simulation at target_time (t=pi) meaning each trotter step evolves pi/trotter_steps in time
qc = qc.bind_parameters({dt: target_time / num_steps})
print("created qc")
# Generate state tomography circuits to evaluate fidelity of simulation
st_qcs = state_tomography_circuits(qc, [0, 1, 2][::-1]) #! state tomography requires === BIG ENDIAN ===
print("created st_qcs (length:", len(st_qcs), ")")
# remove barriers
st_qcs = [RemoveBarriers()(qc) for qc in st_qcs]
print("removed barriers from st_qcs")
# optimize circuit
t3_st_qcs = transpile(st_qcs, optimization_level=3, basis_gates=["sx", "cx", "rz"])
t3_st_qcs = transpile(t3_st_qcs, optimization_level=3, basis_gates=["sx", "cx", "rz"])
print("created t3_st_qcs (length:", len(t3_st_qcs), ")")
# zne wrapping
zne_qcs = zne_wrapper(t3_st_qcs, scale_factors = scale_factors, pt = True) # Pauli Twirling
print("created zne_qcs (length:", len(zne_qcs), ")")
# optimization_level must be 0
# feed initial_layout here to see the picture of the circuits before casting the job
t3_zne_qcs = transpile(zne_qcs, optimization_level=0, basis_gates=["sx", "cx", "rz"], initial_layout=initial_layout)
print("created t3_zne_qcs (length:", len(t3_zne_qcs), ")")
t3_zne_qcs[-3].draw("mpl")
from qiskit.test.mock import FakeJakarta
backend = FakeJakarta()
# backend = Aer.get_backend("qasm_simulator")
# IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q-community', group='ibmquantumawards', project='open-science-22')
# print("provider:", provider)
# backend = provider.get_backend("ibmq_jakarta")
print(str(backend))
shots = 1 << 13
reps = 8 # unused
jobs = []
for _ in range(reps):
#! CHECK: run t3_zne_qcs, with optimization_level = 0 and straightforward initial_layout
job = execute(t3_zne_qcs, backend, shots=shots, optimization_level=0)
print('Job ID', job.job_id())
jobs.append(job)
# QREM
qr = QuantumRegister(num_qubits, name="calq")
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
# we have to feed initial_layout to calibration matrix
cal_job = execute(meas_calibs, backend=backend, shots=shots, optimization_level=3, initial_layout = initial_layout)
print('Job ID', cal_job.job_id())
meas_calibs[0].draw("mpl")
dt_now = datetime.datetime.now()
print(dt_now)
filename = "job_ids_" + str(backend) + "_100step_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl"
print(filename)
# with open("jobs_" + str(backend) + "_100step_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
# pickle.dump({"jobs": jobs, "cal_job": cal_job}, f)
# with open(filename, "wb") as f:
# pickle.dump({"job_ids": [job.job_id() for job in jobs], "cal_job_id": cal_job.job_id()}, f)
# with open("properties_" + str(backend) + "_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
# pickle.dump(backend.properties(), f)
retrieved_jobs = jobs
retrieved_cal_job = cal_job
cal_results = retrieved_cal_job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
target_state = (One^One^Zero).to_matrix() # DO NOT CHANGE!!!
fids = []
for job in retrieved_jobs:
mit_results = meas_fitter.filter.apply(job.result())
zne_expvals = zne_decoder(num_qubits, mit_results, scale_factors = scale_factors)
rho = expvals_to_valid_rho(num_qubits, zne_expvals)
fid = state_fidelity(rho, target_state)
fids.append(fid)
print('state tomography fidelity = {:.4f} \u00B1 {:.4f}'.format(np.mean(fids), np.std(fids)))
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw('mpl')
|
https://github.com/kvillegas33/Heisenberg_model-Qiskit
|
kvillegas33
|
# Here we are importing all packages from qiskit
from qiskit import *
from qiskit.circuit import Parameter
from math import pi
# define the number of qubits
num_q = 3
theta = Parameter('ΞΈ')
# Here create the quantum circuit
qc = QuantumCircuit(num_q)
# Here we apply the Rz gate to the ith-qubit
for i in range(num_q):
qc.rz(theta,i)
# Here we draw the quantum circuit
qc.draw(output='mpl')
# define the number of qubits
num_q = 3
# Here create the quantum circuit with one qubit
qc = QuantumCircuit(num_q)
# Here we apply the Rz, Rx, and Ry gates to the ith-qubit
for i in range(num_q):
qc.rz(theta,i)
qc.ry(theta,i)
qc.rx(theta,i)
qc.barrier()
# Here we draw the quantum circuit
qc.draw(output='mpl')
# define the number of qubits
num_q = 5
# Here create the quantum circuit with one qubit
qc = QuantumCircuit(num_q)
phi = Parameter('Ο')
# Here is the loop that applies the Rz, Rx, and Ry gates to the ith-qubit
for i in range(num_q):
qc.rz(theta,i)
qc.ry(theta,i)
qc.rx(theta,i)
# Here is the loop that applies the CNOTxRzxCNOT gates
for i in range(num_q-1):
qc.cx(i,i+1)
qc.rz(phi,i+1)
qc.cx(i,i+1)
qc.barrier()
# Here we draw the quantum circuit
qc.draw(output='mpl')
# define the number of qubits
num_q = 5
# Here create the quantum circuit with one qubit
qc = QuantumCircuit(num_q)
# Here is the loop that applies the Rz, Rx, and Ry gates to the ith-qubit
for i in range(num_q):
qc.rz(theta,i)
qc.ry(theta,i)
qc.rx(theta,i)
# Implementation of even interactions
for i in range(num_q-1):
if ((i%2)==0 and (i <= (num_q)-2)):
qc.cx(i,i+1)
qc.rz(phi,i+1)
qc.cx(i,i+1)
# Implementation of the odd interactions
for i in range(num_q-1):
if ((i%2)==1 and (i <= (num_q)-2)):
qc.cx(i,i+1)
qc.rz(phi,i+1)
qc.cx(i,i+1)
qc.barrier()
# Here we draw the quantum circuit
qc.draw(output='mpl')
# define the number of qubits
num_q = 5
# Here create the quantum circuit with one qubit
qc = QuantumCircuit(num_q)
# Here is the loop that applies the Rz, Rx, and Ry gates to the ith-qubit
for i in range(num_q):
qc.rz(theta,i)
qc.ry(theta,i)
qc.rx(theta,i)
qc.barrier()
# Implementation of even interactions
for i in range(num_q-1):
if ((i%2)==0 and (i <= (num_q)-2)):
qc.cx(i,i+1)
qc.rz(phi,i+1)
qc.cx(i,i+1)
qc.cx(i,i+1)
qc.ry(phi,i+1)
qc.cx(i,i+1)
qc.cx(i,i+1)
qc.rx(phi,i+1)
qc.cx(i,i+1)
qc.barrier()
# Implementation of the odd interactions
for i in range(num_q-1):
if ((i%2)==1 and (i <= (num_q)-2)):
qc.cx(i,i+1)
qc.rz(phi,i+1)
qc.cx(i,i+1)
qc.cx(i,i+1)
qc.ry(phi,i+1)
qc.cx(i,i+1)
qc.cx(i,i+1)
qc.rx(phi,i+1)
qc.cx(i,i+1)
qc.barrier()
# Here we draw the quantum circuit
qc.draw(output='mpl')
# define the number of qubits
num_q = 5
# Here create the quantum circuit with one qubit
qc = QuantumCircuit(num_q)
# Here is the loop that applies the Rz, Rx, and Ry gates to the ith-qubit
for i in range(num_q):
qc.rz(theta,i)
qc.ry(theta,i)
qc.rx(theta,i)
qc.barrier()
# Implementation of even interactions
for i in range(num_q-1):
if ((i%2)==0 and (i <= (num_q)-2)):
qc.rz(-pi/2,i+1)
qc.cx(i+1,i)
qc.rz(pi/2-phi,i)
qc.ry(phi-pi/2,i+1)
qc.cx(i,i+1)
qc.ry(pi/2-phi,i+1)
qc.cx(i+1,i)
qc.rz(pi/2,i)
qc.barrier()
# Implementation of the odd interactions
for i in range(num_q-1):
if ((i%2)==1 and (i <= (num_q)-2)):
qc.rz(-pi/2,i+1)
qc.cx(i+1,i)
qc.rz(pi/2-phi,i)
qc.ry(phi-pi/2,i+1)
qc.cx(i,i+1)
qc.ry(pi/2-phi,i+1)
qc.cx(i+1,i)
qc.rz(pi/2,i)
qc.barrier()
# Here we draw the quantum circuit
qc.draw(output='mpl')
|
https://github.com/yh08037/quantum-neural-network
|
yh08037
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
# from torchsummary import summary
import qiskit
from qiskit.visualization import *
from qiskit.circuit.random import random_circuit
from itertools import combinations
if torch.cuda.is_available():
DEVICE = torch.device('cuda')
else:
DEVICE = torch.device('cpu')
print('Using PyTorch version:', torch.__version__, ' Device:', DEVICE)
print('cuda index:', torch.cuda.current_device())
print('GPU μ΄λ¦:', torch.cuda.get_device_name())
BATCH_SIZE = 256
EPOCHS = 10 # Number of optimization epochs
n_layers = 1 # Number of random layers
n_train = 50 # Size of the train dataset
n_test = 30 # Size of the test dataset
SAVE_PATH = "quanvolution/" # Data saving folder
PREPROCESS = True # If False, skip quantum processing and load data from SAVE_PATH
seed = 47
np.random.seed(seed) # Seed for NumPy random number generator
torch.manual_seed(seed) # Seed for TensorFlow random number generator
train_dataset = datasets.MNIST(root = "./data",
train = True,
download = True,
transform = transforms.ToTensor())
test_dataset = datasets.MNIST(root = "./data",
train = False,
transform = transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = BATCH_SIZE,
shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset,
batch_size = BATCH_SIZE,
shuffle = False)
for (X_train, y_train) in train_loader:
print('X_train:', X_train.size(), 'type:', X_train.type())
print('y_train:', y_train.size(), 'type:', y_train.type())
break
pltsize = 1
plt.figure(figsize=(10 * pltsize, pltsize))
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis('off')
plt.imshow(X_train[i, :, :, :].numpy().reshape(28, 28), cmap = "gray_r")
plt.title('Class: ' + str(y_train[i].item()))
class QuanvCircuit:
"""
This class defines filter circuit of Quanvolution layer
"""
def __init__(self, kernel_size, backend, shots, threshold):
# --- Circuit definition start ---
self.n_qubits = kernel_size ** 2
self._circuit = qiskit.QuantumCircuit(self.n_qubits)
self.theta = [qiskit.circuit.Parameter('theta{}'.format(i)) for i in range(self.n_qubits)]
for i in range(self.n_qubits):
self._circuit.rx(self.theta[i], i)
self._circuit.barrier()
self._circuit += random_circuit(self.n_qubits, 2)
self._circuit.measure_all()
# ---- Circuit definition end ----
self.backend = backend
self.shots = shots
self.threshold = threshold
def run(self, data):
# data shape: tensor (1, 5, 5)
# val > self.threshold : |1> - rx(pi)
# val <= self.threshold : |0> - rx(0)
# reshape input data
# [1, kernel_size, kernel_size] -> [1, self.n_qubits]
data = torch.reshape(data, (1, self.n_qubits))
# encoding data to parameters
thetas = []
for dat in data:
theta = []
for val in dat:
if val > self.threshold:
theta.append(np.pi)
else:
theta.append(0)
thetas.append(theta)
param_dict = dict()
for theta in thetas:
for i in range(self.n_qubits):
param_dict[self.theta[i]] = theta[i]
param_binds = [param_dict]
# execute random quantum circuit
job = qiskit.execute(self._circuit,
self.backend,
shots = self.shots,
parameter_binds = param_binds)
result = job.result().get_counts(self._circuit)
# decoding the result
counts = 0
for key, val in result.items():
cnt = sum([int(char) for char in key])
counts += cnt * val
# Compute probabilities for each state
probabilities = counts / (self.shots * self.n_qubits)
# probabilities = counts / self.shots
return probabilities
backend = qiskit.Aer.get_backend('qasm_simulator')
filter_size = 2
circ = QuanvCircuit(filter_size, backend, 100, 127)
data = torch.tensor([[0, 200], [100, 255]])
print(data.size())
print(circ.run(data))
circ._circuit.draw(output='mpl')
# def quanv_feed(image):
# """
# Convolves the input image with many applications
# of the same quantum circuit.
# In the standard language of CNN, this would correspond to
# a convolution with a 5Γ5 kernel and a stride equal to 1.
# """
# out = np.zeros((24, 24, 25))
# # Loop over the coordinates of the top-left pixel of 5X5 squares
# for j in range(24):
# for k in range(24):
# # Process a squared 5x5 region of the image with a quantum circuit
# circuit_input = []
# for a in range(5):
# for b in range(5):
# circuit_input.append(image[j + a, k + b, 0])
# q_results = circuit(circuit_input)
# # Assign expectation values to different channels of the output pixel (j/2, k/2)
# for c in range(25):
# out[24, 24, c] = q_results[c]
# return out
class QuanvFunction(Function):
""" Quanv function definition """
@staticmethod
def forward(ctx, inputs, in_channels, out_channels, kernel_size, quantum_circuits, shift):
""" Forward pass computation """
# input shape : (-1, 1, 28, 28)
# otuput shape : (-1, 6, 24, 24)
ctx.in_channels = in_channels
ctx.out_channels = out_channels
ctx.kernel_size = kernel_size
ctx.quantum_circuits = quantum_circuits
ctx.shift = shift
_, _, len_x, len_y = inputs.size()
len_x = len_x - kernel_size + 1
len_y = len_y - kernel_size + 1
features = []
for input in inputs:
feature = []
for circuit in quantum_circuits:
xys = []
for x in range(len_x):
ys = []
for y in range(len_y):
data = input[0, x:x+kernel_size, y:y+kernel_size]
ys.append(circuit.run(data))
xys.append(ys)
feature.append(xys)
features.append(feature)
result = torch.tensor(features)
ctx.save_for_backward(inputs, result)
return result
@staticmethod
def backward(ctx, grad_output): # νμΈ νμ(κ²μ¦ x)
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Quanv(nn.Module):
""" Quanvolution(Quantum convolution) layer definition """
def __init__(self, in_channels, out_channels, kernel_size,
backend=qiskit.Aer.get_backend('qasm_simulator'),
shots=100, shift=np.pi/2):
super(Quanv, self).__init__()
self.quantum_circuits = [QuanvCircuit(kernel_size=kernel_size,
backend=backend, shots=shots, threshold=127)
for i in range(out_channels)]
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.shift = shift
def forward(self, inputs):
return QuanvFunction.apply(inputs, self.in_channels, self.out_channels, self.kernel_size,
self.quantum_circuits, self.shift)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.quanv = Quanv(1, 6, kernel_size=5)
self.conv = nn.Conv2d(6, 16, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(256, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.quanv(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data = data.cuda()
target = target.cuda()
output = model(data).cuda()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100 / batch_size)
)
|
https://github.com/BP-2/SimonsAlgorithm
|
BP-2
|
# importing Qiskit
from qiskit import Aer
from qiskit.providers.aer import AerSimulator
from qiskit.providers.ibmq import least_busy
from qiskit import *
# import basic plot tools
from qiskit.visualization import plot_histogram
from qiskit_textbook.tools import simon_oracle
from IPython.display import display
# This will be our bitstring key
b = '110'
n = len(b)
#this is because we need double bits to act as both registers
simon_circuit = QuantumCircuit(n*2, n)
# Apply Hadamard gates before querying the oracle
simon_circuit.h(range(n))
# Apply barrier for visual separation
simon_circuit.barrier()
#append the black box to the circuit
simon_circuit.append(simon_oracle(b), [0,1,2,3,4,5])
# Apply barrier for visual separation
simon_circuit.barrier()
# Apply Hadamard gates to the input register
simon_circuit.h(range(n))
# Measure qubits
simon_circuit.measure(range(n), range(n))
display(simon_circuit.draw())
# use local simulator
sim = Aer.get_backend('qasm_simulator')
result = execute(simon_circuit, backend = sim, shots = 1024).result()
counts = result.get_counts()
print(counts)
display(plot_histogram(counts))
|
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tests for Layer2Q implementation.
"""
import unittest
from random import randint
import test.python.transpiler.aqc.fast_gradient.utils_for_testing as tut
import numpy as np
import qiskit.transpiler.synthesis.aqc.fast_gradient.layer as lr
from qiskit.transpiler.synthesis.aqc.fast_gradient.pmatrix import PMatrix
from qiskit.test import QiskitTestCase
class TestLayer2q(QiskitTestCase):
"""
Tests for Layer2Q class.
"""
max_num_qubits = 5 # maximum number of qubits in tests
num_repeats = 50 # number of repetitions in tests
def setUp(self):
super().setUp()
np.random.seed(0x0696969)
def test_layer2q_matrix(self):
"""
Tests: (1) the correctness of Layer2Q matrix construction;
(2) matrix multiplication interleaved with permutations.
"""
mat_kind = "complex"
_eps = 100.0 * np.finfo(float).eps
max_rel_err = 0.0
for n in range(2, self.max_num_qubits + 1):
dim = 2**n
iden = tut.eye_int(n)
for j in range(n):
for k in range(n):
if j == k:
continue
m_mat = tut.rand_matrix(dim=dim, kind=mat_kind)
t_mat, g_mat = tut.make_test_matrices4x4(n=n, j=j, k=k, kind=mat_kind)
lmat = lr.Layer2Q(num_qubits=n, j=j, k=k, g4x4=g_mat)
g2, perm, inv_perm = lmat.get_attr()
self.assertTrue(m_mat.dtype == t_mat.dtype == g_mat.dtype == g2.dtype)
self.assertTrue(np.all(g_mat == g2))
self.assertTrue(np.all(iden[perm].T == iden[inv_perm]))
g_mat = np.kron(tut.eye_int(n - 2), g_mat)
# T == P^t @ G @ P.
err = tut.relative_error(t_mat, iden[perm].T @ g_mat @ iden[perm])
self.assertLess(err, _eps, "err = {:0.16f}".format(err))
max_rel_err = max(max_rel_err, err)
# Multiplication by permutation matrix of the left can be
# replaced by row permutations.
tm = t_mat @ m_mat
err1 = tut.relative_error(iden[perm].T @ g_mat @ m_mat[perm], tm)
err2 = tut.relative_error((g_mat @ m_mat[perm])[inv_perm], tm)
# Multiplication by permutation matrix of the right can be
# replaced by column permutations.
mt = m_mat @ t_mat
err3 = tut.relative_error(m_mat @ iden[perm].T @ g_mat @ iden[perm], mt)
err4 = tut.relative_error((m_mat[:, perm] @ g_mat)[:, inv_perm], mt)
self.assertTrue(
err1 < _eps and err2 < _eps and err3 < _eps and err4 < _eps,
"err1 = {:f}, err2 = {:f}, "
"err3 = {:f}, err4 = {:f}".format(err1, err2, err3, err4),
)
max_rel_err = max(max_rel_err, err1, err2, err3, err4)
def test_pmatrix_class(self):
"""
Test the class PMatrix.
"""
_eps = 100.0 * np.finfo(float).eps
mat_kind = "complex"
max_rel_err = 0.0
for n in range(2, self.max_num_qubits + 1):
dim = 2**n
tmp1 = np.ndarray((dim, dim), dtype=np.cfloat)
tmp2 = tmp1.copy()
for _ in range(self.num_repeats):
j0 = randint(0, n - 1)
k0 = (j0 + randint(1, n - 1)) % n
j1 = randint(0, n - 1)
k1 = (j1 + randint(1, n - 1)) % n
j2 = randint(0, n - 1)
k2 = (j2 + randint(1, n - 1)) % n
j3 = randint(0, n - 1)
k3 = (j3 + randint(1, n - 1)) % n
j4 = randint(0, n - 1)
k4 = (j4 + randint(1, n - 1)) % n
t0, g0 = tut.make_test_matrices4x4(n=n, j=j0, k=k0, kind=mat_kind)
t1, g1 = tut.make_test_matrices4x4(n=n, j=j1, k=k1, kind=mat_kind)
t2, g2 = tut.make_test_matrices4x4(n=n, j=j2, k=k2, kind=mat_kind)
t3, g3 = tut.make_test_matrices4x4(n=n, j=j3, k=k3, kind=mat_kind)
t4, g4 = tut.make_test_matrices4x4(n=n, j=j4, k=k4, kind=mat_kind)
c0 = lr.Layer2Q(num_qubits=n, j=j0, k=k0, g4x4=g0)
c1 = lr.Layer2Q(num_qubits=n, j=j1, k=k1, g4x4=g1)
c2 = lr.Layer2Q(num_qubits=n, j=j2, k=k2, g4x4=g2)
c3 = lr.Layer2Q(num_qubits=n, j=j3, k=k3, g4x4=g3)
c4 = lr.Layer2Q(num_qubits=n, j=j4, k=k4, g4x4=g4)
m_mat = tut.rand_matrix(dim=dim, kind=mat_kind)
ttmtt = t0 @ t1 @ m_mat @ np.conj(t2).T @ np.conj(t3).T
pmat = PMatrix(n)
pmat.set_matrix(m_mat)
pmat.mul_left_q2(layer=c1, temp_mat=tmp1)
pmat.mul_left_q2(layer=c0, temp_mat=tmp1)
pmat.mul_right_q2(layer=c2, temp_mat=tmp1)
pmat.mul_right_q2(layer=c3, temp_mat=tmp1)
alt_ttmtt = pmat.finalize(temp_mat=tmp1)
err1 = tut.relative_error(alt_ttmtt, ttmtt)
self.assertLess(err1, _eps, "relative error: {:f}".format(err1))
prod = np.cfloat(np.trace(ttmtt @ t4))
alt_prod = pmat.product_q2(layer=c4, tmp1=tmp1, tmp2=tmp2)
err2 = abs(alt_prod - prod) / abs(prod)
self.assertLess(err2, _eps, "relative error: {:f}".format(err2))
max_rel_err = max(max_rel_err, err1, err2)
if __name__ == "__main__":
unittest.main()
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-function-docstring
"""
Tests for the default UnitarySynthesis transpiler pass.
"""
from test import combine
import unittest
import numpy as np
from ddt import ddt, data
from qiskit import transpile
from qiskit.test import QiskitTestCase
from qiskit.providers.fake_provider import FakeVigo, FakeMumbaiFractionalCX, FakeBelemV2
from qiskit.providers.fake_provider.fake_backend_v2 import FakeBackendV2, FakeBackend5QV2
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit.library import QuantumVolume
from qiskit.converters import circuit_to_dag, dag_to_circuit
from qiskit.transpiler.passes import UnitarySynthesis
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info.random import random_unitary
from qiskit.transpiler import PassManager, CouplingMap, Target, InstructionProperties
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.exceptions import QiskitError
from qiskit.transpiler.passes import (
Collect2qBlocks,
ConsolidateBlocks,
Optimize1qGates,
SabreLayout,
Unroll3qOrMore,
CheckMap,
BarrierBeforeFinalMeasurements,
SabreSwap,
TrivialLayout,
)
from qiskit.circuit.library import (
IGate,
CXGate,
RZGate,
RXGate,
SXGate,
XGate,
iSwapGate,
ECRGate,
UGate,
ZGate,
RYYGate,
RZZGate,
RXXGate,
)
from qiskit.circuit import Measure
from qiskit.circuit.controlflow import IfElseOp
from qiskit.circuit import Parameter, Gate
@ddt
class TestUnitarySynthesis(QiskitTestCase):
"""Test UnitarySynthesis pass."""
def test_empty_basis_gates(self):
"""Verify when basis_gates is None, we do not synthesize unitaries."""
qc = QuantumCircuit(3)
op_1q = random_unitary(2, seed=0)
op_2q = random_unitary(4, seed=0)
op_3q = random_unitary(8, seed=0)
qc.unitary(op_1q.data, [0])
qc.unitary(op_2q.data, [0, 1])
qc.unitary(op_3q.data, [0, 1, 2])
out = UnitarySynthesis(basis_gates=None, min_qubits=2)(qc)
self.assertEqual(out.count_ops(), {"unitary": 3})
@data(
["u3", "cx"],
["u1", "u2", "u3", "cx"],
["rx", "ry", "rxx"],
["rx", "rz", "iswap"],
["u3", "rx", "rz", "cz", "iswap"],
)
def test_two_qubit_synthesis_to_basis(self, basis_gates):
"""Verify two qubit unitaries are synthesized to match basis gates."""
bell = QuantumCircuit(2)
bell.h(0)
bell.cx(0, 1)
bell_op = Operator(bell)
qc = QuantumCircuit(2)
qc.unitary(bell_op, [0, 1])
dag = circuit_to_dag(qc)
out = UnitarySynthesis(basis_gates).run(dag)
self.assertTrue(set(out.count_ops()).issubset(basis_gates))
def test_two_qubit_synthesis_to_directional_cx_from_gate_errors(self):
"""Verify two qubit unitaries are synthesized to match basis gates."""
# TODO: should make check more explicit e.g. explicitly set gate
# direction in test instead of using specific fake backend
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap(conf.coupling_map)
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=None,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
unisynth_pass_nat = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=None,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])
qc_out_nat = pm_nat.run(qc)
self.assertEqual(Operator(qc), Operator(qc_out))
self.assertEqual(Operator(qc), Operator(qc_out_nat))
def test_swap_synthesis_to_directional_cx(self):
"""Verify two qubit unitaries are synthesized to match basis gates."""
# TODO: should make check more explicit e.g. explicitly set gate
# direction in test instead of using specific fake backend
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap(conf.coupling_map)
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.swap(qr[0], qr[1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=None,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
unisynth_pass_nat = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=None,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])
qc_out_nat = pm_nat.run(qc)
self.assertEqual(Operator(qc), Operator(qc_out))
self.assertEqual(Operator(qc), Operator(qc_out_nat))
def test_two_qubit_synthesis_to_directional_cx_multiple_registers(self):
"""Verify two qubit unitaries are synthesized to match basis gates
across multiple registers."""
# TODO: should make check more explicit e.g. explicitly set gate
# direction in test instead of using specific fake backend
backend = FakeVigo()
conf = backend.configuration()
qr0 = QuantumRegister(1)
qr1 = QuantumRegister(1)
coupling_map = CouplingMap(conf.coupling_map)
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr0, qr1)
qc.unitary(random_unitary(4, seed=12), [qr0[0], qr1[0]])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=None,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
unisynth_pass_nat = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=None,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])
qc_out_nat = pm_nat.run(qc)
self.assertEqual(Operator(qc), Operator(qc_out))
self.assertEqual(Operator(qc), Operator(qc_out_nat))
def test_two_qubit_synthesis_to_directional_cx_from_coupling_map(self):
"""Verify natural cx direction is used when specified in coupling map."""
# TODO: should make check more explicit e.g. explicitly set gate
# direction in test instead of using specific fake backend
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
unisynth_pass_nat = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])
qc_out_nat = pm_nat.run(qc)
# the decomposer defaults to the [1, 0] direction but the coupling
# map specifies a [0, 1] direction. Check that this is respected.
self.assertTrue(
all(((qr[1], qr[0]) == instr.qubits for instr in qc_out.get_instructions("cx")))
)
self.assertTrue(
all(((qr[0], qr[1]) == instr.qubits for instr in qc_out_nat.get_instructions("cx")))
)
self.assertEqual(Operator(qc), Operator(qc_out))
self.assertEqual(Operator(qc), Operator(qc_out_nat))
def test_two_qubit_synthesis_to_directional_cx_from_coupling_map_natural_none(self):
"""Verify natural cx direction is used when specified in coupling map
when natural_direction is None."""
# TODO: should make check more explicit e.g. explicitly set gate
# direction in test instead of using specific fake backend
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
unisynth_pass_nat = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=None,
)
pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])
qc_out_nat = pm_nat.run(qc)
# the decomposer defaults to the [1, 0] direction but the coupling
# map specifies a [0, 1] direction. Check that this is respected.
self.assertTrue(
all(((qr[1], qr[0]) == instr.qubits for instr in qc_out.get_instructions("cx")))
)
self.assertTrue(
all(((qr[0], qr[1]) == instr.qubits for instr in qc_out_nat.get_instructions("cx")))
)
self.assertEqual(Operator(qc), Operator(qc_out))
self.assertEqual(Operator(qc), Operator(qc_out_nat))
def test_two_qubit_synthesis_to_directional_cx_from_coupling_map_natural_false(self):
"""Verify natural cx direction is used when specified in coupling map
when natural_direction is None."""
# TODO: should make check more explicit e.g. explicitly set gate
# direction in test instead of using specific fake backend
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
unisynth_pass_nat = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=False,
)
pm_nat = PassManager([triv_layout_pass, unisynth_pass_nat])
qc_out_nat = pm_nat.run(qc)
# the decomposer defaults to the [1, 0] direction but the coupling
# map specifies a [0, 1] direction. Check that this is respected.
self.assertTrue(
all(((qr[1], qr[0]) == instr.qubits for instr in qc_out.get_instructions("cx")))
)
self.assertTrue(
all(((qr[1], qr[0]) == instr.qubits for instr in qc_out_nat.get_instructions("cx")))
)
self.assertEqual(Operator(qc), Operator(qc_out))
self.assertEqual(Operator(qc), Operator(qc_out_nat))
def test_two_qubit_synthesis_not_pulse_optimal(self):
"""Verify not attempting pulse optimal decomposition when pulse_optimize==False."""
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=False,
natural_direction=True,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
if isinstance(qc_out, QuantumCircuit):
num_ops = qc_out.count_ops()
else:
num_ops = qc_out[0].count_ops()
self.assertIn("sx", num_ops)
self.assertGreaterEqual(num_ops["sx"], 16)
def test_two_qubit_pulse_optimal_true_raises(self):
"""Verify raises if pulse optimal==True but cx is not in the backend basis."""
backend = FakeVigo()
conf = backend.configuration()
# this assumes iswawp pulse optimal decomposition doesn't exist
conf.basis_gates = [gate if gate != "cx" else "iswap" for gate in conf.basis_gates]
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
with self.assertRaises(QiskitError):
pm.run(qc)
def test_two_qubit_natural_direction_true_duration_fallback(self):
"""Verify not attempting pulse optimal decomposition when pulse_optimize==False."""
# this assumes iswawp pulse optimal decomposition doesn't exist
backend = FakeVigo()
conf = backend.configuration()
# conf.basis_gates = [gate if gate != "cx" else "iswap" for gate in conf.basis_gates]
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 0], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
self.assertTrue(
all(((qr[0], qr[1]) == instr.qubits for instr in qc_out.get_instructions("cx")))
)
def test_two_qubit_natural_direction_true_gate_length_raises(self):
"""Verify not attempting pulse optimal decomposition when pulse_optimize==False."""
# this assumes iswawp pulse optimal decomposition doesn't exist
backend = FakeVigo()
conf = backend.configuration()
for _, nduv in backend.properties()._gates["cx"].items():
nduv["gate_length"] = (4e-7, nduv["gate_length"][1])
nduv["gate_error"] = (7e-3, nduv["gate_error"][1])
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 0], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
backend_props=backend.properties(),
pulse_optimize=True,
natural_direction=True,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
with self.assertRaises(TranspilerError):
pm.run(qc)
def test_two_qubit_pulse_optimal_none_optimal(self):
"""Verify pulse optimal decomposition when pulse_optimize==None."""
# this assumes iswawp pulse optimal decomposition doesn't exist
backend = FakeVigo()
conf = backend.configuration()
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=None,
natural_direction=True,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
qc_out = pm.run(qc)
if isinstance(qc_out, QuantumCircuit):
num_ops = qc_out.count_ops()
else:
num_ops = qc_out[0].count_ops()
self.assertIn("sx", num_ops)
self.assertLessEqual(num_ops["sx"], 12)
def test_two_qubit_pulse_optimal_none_no_raise(self):
"""Verify pulse optimal decomposition when pulse_optimize==None doesn't
raise when pulse optimal decomposition unknown."""
# this assumes iswawp pulse optimal decomposition doesn't exist
backend = FakeVigo()
conf = backend.configuration()
conf.basis_gates = [gate if gate != "cx" else "iswap" for gate in conf.basis_gates]
qr = QuantumRegister(2)
coupling_map = CouplingMap([[0, 1], [1, 2], [1, 3], [3, 4]])
triv_layout_pass = TrivialLayout(coupling_map)
qc = QuantumCircuit(qr)
qc.unitary(random_unitary(4, seed=12), [0, 1])
unisynth_pass = UnitarySynthesis(
basis_gates=conf.basis_gates,
coupling_map=coupling_map,
backend_props=backend.properties(),
pulse_optimize=None,
natural_direction=True,
)
pm = PassManager([triv_layout_pass, unisynth_pass])
try:
qc_out = pm.run(qc)
except QiskitError:
self.fail("pulse_optimize=None raised exception unexpectedly")
if isinstance(qc_out, QuantumCircuit):
num_ops = qc_out.count_ops()
else:
num_ops = qc_out[0].count_ops()
self.assertIn("sx", num_ops)
self.assertLessEqual(num_ops["sx"], 14)
def test_qv_natural(self):
"""check that quantum volume circuit compiles for natural direction"""
qv64 = QuantumVolume(5, seed=15)
def construct_passmanager(basis_gates, coupling_map, synthesis_fidelity, pulse_optimize):
seed = 2
_map = [SabreLayout(coupling_map, max_iterations=2, seed=seed)]
_unroll3q = Unroll3qOrMore()
_swap_check = CheckMap(coupling_map)
_swap = [
BarrierBeforeFinalMeasurements(),
SabreSwap(coupling_map, heuristic="lookahead", seed=seed),
]
_optimize = [
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=basis_gates),
UnitarySynthesis(
basis_gates,
synthesis_fidelity,
coupling_map,
pulse_optimize=pulse_optimize,
natural_direction=True,
),
Optimize1qGates(basis_gates),
]
pm = PassManager()
pm.append(_map) # map to hardware by inserting swaps
pm.append(_unroll3q)
pm.append(_swap_check)
pm.append(_swap)
pm.append(_optimize)
return pm
coupling_map = CouplingMap([[0, 1], [1, 2], [3, 2], [3, 4], [5, 4]])
basis_gates = ["rz", "sx", "cx"]
pm1 = construct_passmanager(
basis_gates=basis_gates,
coupling_map=coupling_map,
synthesis_fidelity=0.99,
pulse_optimize=True,
)
pm2 = construct_passmanager(
basis_gates=basis_gates,
coupling_map=coupling_map,
synthesis_fidelity=0.99,
pulse_optimize=False,
)
qv64_1 = pm1.run(qv64.decompose())
qv64_2 = pm2.run(qv64.decompose())
edges = [list(edge) for edge in coupling_map.get_edges()]
self.assertTrue(
all(
[qv64_1.qubits.index(qubit) for qubit in instr.qubits] in edges
for instr in qv64_1.get_instructions("cx")
)
)
self.assertEqual(Operator(qv64_1), Operator(qv64_2))
@data(1, 2, 3)
def test_coupling_map_transpile(self, opt):
"""test natural_direction works with transpile/execute"""
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [0, 1])
circ_01 = transpile(
circ, basis_gates=["rz", "sx", "cx"], optimization_level=opt, coupling_map=[[0, 1]]
)
circ_10 = transpile(
circ, basis_gates=["rz", "sx", "cx"], optimization_level=opt, coupling_map=[[1, 0]]
)
circ_01_index = {qubit: index for index, qubit in enumerate(circ_01.qubits)}
circ_10_index = {qubit: index for index, qubit in enumerate(circ_10.qubits)}
self.assertTrue(
all(
(
(1, 0) == (circ_10_index[instr.qubits[0]], circ_10_index[instr.qubits[1]])
for instr in circ_10.get_instructions("cx")
)
)
)
self.assertTrue(
all(
(
(0, 1) == (circ_01_index[instr.qubits[0]], circ_01_index[instr.qubits[1]])
for instr in circ_01.get_instructions("cx")
)
)
)
@combine(
opt_level=[0, 1, 2, 3],
bidirectional=[True, False],
dsc=(
"test natural_direction works with transpile using opt_level {opt_level} on"
" target with multiple 2q gates with bidirectional={bidirectional}"
),
name="opt_level_{opt_level}_bidirectional_{bidirectional}",
)
def test_coupling_map_transpile_with_backendv2(self, opt_level, bidirectional):
backend = FakeBackend5QV2(bidirectional)
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [0, 1])
circ_01 = transpile(
circ, backend=backend, optimization_level=opt_level, layout_method="trivial"
)
circ_01_index = {qubit: index for index, qubit in enumerate(circ_01.qubits)}
self.assertGreaterEqual(len(circ_01.get_instructions("cx")), 1)
for instr in circ_01.get_instructions("cx"):
self.assertEqual(
(0, 1), (circ_01_index[instr.qubits[0]], circ_01_index[instr.qubits[1]])
)
@data(1, 2, 3)
def test_coupling_map_unequal_durations(self, opt):
"""Test direction with transpile/execute with backend durations."""
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [1, 0])
backend = FakeVigo()
tqc = transpile(
circ,
backend=backend,
optimization_level=opt,
translation_method="synthesis",
layout_method="trivial",
)
tqc_index = {qubit: index for index, qubit in enumerate(tqc.qubits)}
self.assertTrue(
all(
(
(0, 1) == (tqc_index[instr.qubits[0]], tqc_index[instr.qubits[1]])
for instr in tqc.get_instructions("cx")
)
)
)
@combine(
opt_level=[0, 1, 2, 3],
bidirectional=[True, False],
dsc=(
"Test direction with transpile using opt_level {opt_level} on"
" target with multiple 2q gates with bidirectional={bidirectional}"
"direction [0, 1] is lower error and should be picked."
),
name="opt_level_{opt_level}_bidirectional_{bidirectional}",
)
def test_coupling_unequal_duration_with_backendv2(self, opt_level, bidirectional):
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [1, 0])
backend = FakeBackend5QV2(bidirectional)
tqc = transpile(
circ,
backend=backend,
optimization_level=opt_level,
translation_method="synthesis",
layout_method="trivial",
)
tqc_index = {qubit: index for index, qubit in enumerate(tqc.qubits)}
self.assertGreaterEqual(len(tqc.get_instructions("cx")), 1)
for instr in tqc.get_instructions("cx"):
self.assertEqual((0, 1), (tqc_index[instr.qubits[0]], tqc_index[instr.qubits[1]]))
@combine(
opt_level=[0, 1, 2, 3],
dsc=(
"Test direction with transpile using opt_level {opt_level} on"
" target with multiple 2q gates"
),
name="opt_level_{opt_level}",
)
def test_non_overlapping_kak_gates_with_backendv2(self, opt_level):
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [1, 0])
backend = FakeBackendV2()
tqc = transpile(
circ,
backend=backend,
optimization_level=opt_level,
translation_method="synthesis",
layout_method="trivial",
)
tqc_index = {qubit: index for index, qubit in enumerate(tqc.qubits)}
self.assertGreaterEqual(len(tqc.get_instructions("ecr")), 1)
for instr in tqc.get_instructions("ecr"):
self.assertEqual((1, 0), (tqc_index[instr.qubits[0]], tqc_index[instr.qubits[1]]))
def test_fractional_cx_with_backendv2(self):
"""Test fractional CX gets used if present in target."""
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [0, 1])
backend = FakeMumbaiFractionalCX()
synth_pass = UnitarySynthesis(target=backend.target)
tqc = synth_pass(circ)
tqc_index = {qubit: index for index, qubit in enumerate(tqc.qubits)}
self.assertGreaterEqual(len(tqc.get_instructions("rzx")), 1)
for instr in tqc.get_instructions("rzx"):
self.assertEqual((0, 1), (tqc_index[instr.qubits[0]], tqc_index[instr.qubits[1]]))
@combine(
opt_level=[0, 1, 2, 3],
dsc=(
"Test direction with transpile using opt_level {opt_level} on"
"target with multiple 2q gates available in reverse direction"
),
name="opt_level_{opt_level}",
)
def test_reverse_direction(self, opt_level):
target = Target(2)
target.add_instruction(CXGate(), {(0, 1): InstructionProperties(error=1.2e-6)})
target.add_instruction(ECRGate(), {(0, 1): InstructionProperties(error=1.2e-7)})
target.add_instruction(
UGate(Parameter("theta"), Parameter("phi"), Parameter("lam")), {(0,): None, (1,): None}
)
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [1, 0])
tqc = transpile(
circ,
target=target,
optimization_level=opt_level,
translation_method="synthesis",
layout_method="trivial",
)
tqc_index = {qubit: index for index, qubit in enumerate(tqc.qubits)}
self.assertGreaterEqual(len(tqc.get_instructions("ecr")), 1)
for instr in tqc.get_instructions("ecr"):
self.assertEqual((0, 1), (tqc_index[instr.qubits[0]], tqc_index[instr.qubits[1]]))
@combine(
opt_level=[0, 1, 2, 3],
dsc=("Test controlled but not supercontrolled basis"),
name="opt_level_{opt_level}",
)
def test_controlled_basis(self, opt_level):
target = Target(2)
target.add_instruction(RYYGate(np.pi / 8), {(0, 1): InstructionProperties(error=1.2e-6)})
target.add_instruction(
UGate(Parameter("theta"), Parameter("phi"), Parameter("lam")), {(0,): None, (1,): None}
)
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.append(random_unitary(4, seed=1), [1, 0])
tqc = transpile(
circ,
target=target,
optimization_level=opt_level,
translation_method="synthesis",
layout_method="trivial",
)
self.assertGreaterEqual(len(tqc.get_instructions("ryy")), 1)
self.assertEqual(Operator(tqc), Operator(circ))
def test_approximation_controlled(self):
target = Target(2)
target.add_instruction(RZZGate(np.pi / 10), {(0, 1): InstructionProperties(error=0.006)})
target.add_instruction(RXXGate(np.pi / 3), {(0, 1): InstructionProperties(error=0.01)})
target.add_instruction(
UGate(Parameter("theta"), Parameter("phi"), Parameter("lam")),
{(0,): InstructionProperties(error=0.001), (1,): InstructionProperties(error=0.002)},
)
circ = QuantumCircuit(2)
circ.append(random_unitary(4, seed=7), [1, 0])
dag = circuit_to_dag(circ)
dag_100 = UnitarySynthesis(target=target, approximation_degree=1.0).run(dag)
dag_99 = UnitarySynthesis(target=target, approximation_degree=0.99).run(dag)
self.assertGreaterEqual(dag_100.depth(), dag_99.depth())
self.assertEqual(Operator(dag_to_circuit(dag_100)), Operator(circ))
def test_if_simple(self):
"""Test a simple if statement."""
basis_gates = {"u", "cx"}
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc_uni = QuantumCircuit(2)
qc_uni.h(0)
qc_uni.cx(0, 1)
qc_uni_mat = Operator(qc_uni)
qc_true_body = QuantumCircuit(2)
qc_true_body.unitary(qc_uni_mat, [0, 1])
qc = QuantumCircuit(qr, cr)
qc.if_test((cr, 1), qc_true_body, [0, 1], [])
dag = circuit_to_dag(qc)
cdag = UnitarySynthesis(basis_gates=basis_gates).run(dag)
cqc = dag_to_circuit(cdag)
cbody = cqc.data[0].operation.params[0]
self.assertEqual(cbody.count_ops().keys(), basis_gates)
self.assertEqual(qc_uni_mat, Operator(cbody))
def test_nested_control_flow(self):
"""Test unrolling nested control flow blocks."""
qr = QuantumRegister(2)
cr = ClassicalRegister(1)
qc_uni1 = QuantumCircuit(2)
qc_uni1.swap(0, 1)
qc_uni1_mat = Operator(qc_uni1)
qc = QuantumCircuit(qr, cr)
with qc.for_loop(range(3)):
with qc.while_loop((cr, 0)):
qc.unitary(qc_uni1_mat, [0, 1])
dag = circuit_to_dag(qc)
cdag = UnitarySynthesis(basis_gates=["u", "cx"]).run(dag)
cqc = dag_to_circuit(cdag)
cbody = cqc.data[0].operation.params[2].data[0].operation.params[0]
self.assertEqual(cbody.count_ops().keys(), {"u", "cx"})
self.assertEqual(qc_uni1_mat, Operator(cbody))
def test_mapping_control_flow(self):
"""Test that inner dags use proper qubit mapping."""
qr = QuantumRegister(3, "q")
qc = QuantumCircuit(qr)
# Create target that supports CX only between 0 and 2.
fake_target = Target()
fake_target.add_instruction(CXGate(), {(0, 2): None})
fake_target.add_instruction(
UGate(Parameter("t"), Parameter("p"), Parameter("l")),
{
(0,): None,
(1,): None,
(2,): None,
},
)
qc_uni1 = QuantumCircuit(2)
qc_uni1.swap(0, 1)
qc_uni1_mat = Operator(qc_uni1)
loop_body = QuantumCircuit(2)
loop_body.unitary(qc_uni1_mat, [0, 1])
# Loop body uses qubits 0 and 2, mapped to 0 and 1 in the block.
# If synthesis doesn't handle recursive mapping, it'll incorrectly
# look for a CX on (0, 1) instead of on (0, 2).
qc.for_loop((0,), None, loop_body, [0, 2], [])
dag = circuit_to_dag(qc)
UnitarySynthesis(basis_gates=["u", "cx"], target=fake_target).run(dag)
def test_single_qubit_with_target(self):
"""Test input circuit with only 1q works with target."""
qc = QuantumCircuit(1)
qc.append(ZGate(), [qc.qubits[0]])
dag = circuit_to_dag(qc)
unitary_synth_pass = UnitarySynthesis(target=FakeBelemV2().target)
result_dag = unitary_synth_pass.run(dag)
result_qc = dag_to_circuit(result_dag)
self.assertEqual(qc, result_qc)
def test_single_qubit_identity_with_target(self):
"""Test input single qubit identity works with target."""
qc = QuantumCircuit(1)
qc.unitary([[1.0, 0.0], [0.0, 1.0]], 0)
dag = circuit_to_dag(qc)
unitary_synth_pass = UnitarySynthesis(target=FakeBelemV2().target)
result_dag = unitary_synth_pass.run(dag)
result_qc = dag_to_circuit(result_dag)
self.assertEqual(result_qc, QuantumCircuit(1))
def test_unitary_synthesis_with_ideal_and_variable_width_ops(self):
"""Test unitary synthesis works with a target that contains ideal and variadic ops."""
qc = QuantumCircuit(2)
qc.unitary(np.eye(4), [0, 1])
dag = circuit_to_dag(qc)
target = FakeBelemV2().target
target.add_instruction(IfElseOp, name="if_else")
target.add_instruction(ZGate())
target.add_instruction(ECRGate())
unitary_synth_pass = UnitarySynthesis(target=target)
result_dag = unitary_synth_pass.run(dag)
result_qc = dag_to_circuit(result_dag)
self.assertEqual(result_qc, QuantumCircuit(2))
def test_unitary_synthesis_custom_gate_target(self):
qc = QuantumCircuit(2)
qc.unitary(np.eye(4), [0, 1])
dag = circuit_to_dag(qc)
class CustomGate(Gate):
"""Custom Opaque Gate"""
def __init__(self):
super().__init__("custom", 2, [])
target = Target(num_qubits=2)
target.add_instruction(
UGate(Parameter("t"), Parameter("p"), Parameter("l")), {(0,): None, (1,): None}
)
target.add_instruction(CustomGate(), {(0, 1): None, (1, 0): None})
unitary_synth_pass = UnitarySynthesis(target=target)
result_dag = unitary_synth_pass.run(dag)
result_qc = dag_to_circuit(result_dag)
self.assertEqual(result_qc, qc)
def test_default_does_not_fail_on_no_syntheses(self):
qc = QuantumCircuit(1)
qc.unitary(np.eye(2), [0])
pass_ = UnitarySynthesis(["unknown", "gates"])
self.assertEqual(qc, pass_(qc))
def test_iswap_no_cx_synthesis_succeeds(self):
"""Test basis set with iswap but no cx can synthesize a circuit"""
target = Target()
theta = Parameter("theta")
i_props = {
(0,): InstructionProperties(duration=35.5e-9, error=0.000413),
(1,): InstructionProperties(duration=35.5e-9, error=0.000502),
}
target.add_instruction(IGate(), i_props)
rz_props = {
(0,): InstructionProperties(duration=0, error=0),
(1,): InstructionProperties(duration=0, error=0),
}
target.add_instruction(RZGate(theta), rz_props)
sx_props = {
(0,): InstructionProperties(duration=35.5e-9, error=0.000413),
(1,): InstructionProperties(duration=35.5e-9, error=0.000502),
}
target.add_instruction(SXGate(), sx_props)
x_props = {
(0,): InstructionProperties(duration=35.5e-9, error=0.000413),
(1,): InstructionProperties(duration=35.5e-9, error=0.000502),
}
target.add_instruction(XGate(), x_props)
iswap_props = {
(0, 1): InstructionProperties(duration=519.11e-9, error=0.01201),
(1, 0): InstructionProperties(duration=554.66e-9, error=0.01201),
}
target.add_instruction(iSwapGate(), iswap_props)
measure_props = {
(0,): InstructionProperties(duration=5.813e-6, error=0.0751),
(1,): InstructionProperties(duration=5.813e-6, error=0.0225),
}
target.add_instruction(Measure(), measure_props)
qc = QuantumCircuit(2)
cxmat = Operator(CXGate()).to_matrix()
qc.unitary(cxmat, [0, 1])
unitary_synth_pass = UnitarySynthesis(target=target)
dag = circuit_to_dag(qc)
result_dag = unitary_synth_pass.run(dag)
result_qc = dag_to_circuit(result_dag)
self.assertTrue(np.allclose(Operator(result_qc.to_gate()).to_matrix(), cxmat))
def test_parameterized_basis_gate_in_target(self):
"""Test synthesis with parameterized RXX gate."""
theta = Parameter("ΞΈ")
lam = Parameter("Ξ»")
target = Target(num_qubits=2)
target.add_instruction(RZGate(lam))
target.add_instruction(RXGate(theta))
target.add_instruction(RXXGate(theta))
qc = QuantumCircuit(2)
qc.cp(np.pi / 2, 0, 1)
qc_transpiled = transpile(qc, target=target, optimization_level=3, seed_transpiler=42)
opcount = qc_transpiled.count_ops()
self.assertTrue(set(opcount).issubset({"rz", "rx", "rxx"}))
self.assertTrue(np.allclose(Operator(qc_transpiled), Operator(qc)))
if __name__ == "__main__":
unittest.main()
|
https://github.com/qBraid/qiskit-fall-fest-algiers
|
qBraid
|
!qbraid
!qbraid --version
!qbraid jobs enable qbraid_sdk
import qbraid
qbraid.__version__
IBMQ.save_account('YOUR_IBM_KEY')
from qbraid import get_devices
get_devices()
get_devices(
filters={
"type": "Simulator",
"name": {"$regex": "State"},
"vendor": {"$in": ["AWS", "IBM"]},
}
)
get_devices(
filters={
"paradigm": "gate-based",
"type": "QPU",
"numberQubits": {"$gte": 5},
"status": "ONLINE",
}
)
get_devices(filters={"runPackage": "qiskit", "requiresCred": "false"})
from qbraid import device_wrapper, job_wrapper, get_jobs
from qbraid.api import ibmq_least_busy_qpu, verify_config
qbraid_device = device_wrapper("ibm_aer_qasm_sim")
qbraid_device.info
verify_config("IBM")
least_busy = ibmq_least_busy_qpu() # requires credential
least_busy
ibmq_id = "ibm_q_qasm_sim"
qbraid_ibmq_device = device_wrapper(ibmq_id)
qbraid_ibmq_device.vendor_dlo
from qiskit import QuantumCircuit
import numpy as np
qiskit_circuit = QuantumCircuit(1, 1)
qiskit_circuit.h(0)
qiskit_circuit.ry(np.pi / 4, 0)
qiskit_circuit.rz(np.pi / 2, 0)
qiskit_circuit.measure(0, 0)
qiskit_circuit.draw()
shots = 2
# qbraid_ibmq_job = qbraid_ibmq_device.run(qiskit_circuit, shots=shots)
# qbraid_ibmq_job.status()
google_id = "google_cirq_dm_sim"
qbraid_google_device = device_wrapper(google_id)
aws_id = "aws_dm_sim"
qbraid_aws_device = device_wrapper(aws_id) # Credential handled by qBraid Quantum Jobs
qbraid_google_job = qbraid_google_device.run(qiskit_circuit, shots=shots)
qbraid_aws_job = qbraid_aws_device.run(qiskit_circuit, shots=shots)
get_jobs()
# qbraid_ibmq_job.wait_for_final_state()
jobs = [qbraid_google_job, qbraid_aws_job]
google_result, aws_result = [job.result() for job in jobs]
print(f"{qbraid_google_device.name} counts: {google_result.measurement_counts()}")
print(f"{qbraid_aws_device.name} counts: {aws_result.measurement_counts()}")
# print(f"{qbraid_ibmq_device.name} counts: {ibmq_result.measurement_counts()}")
google_result.plot_counts()
aws_result.plot_counts()
ibmq_result.plot_counts()
|
https://github.com/PavanCyborg/Quantum-Algorithms-Benchmarking
|
PavanCyborg
|
import numpy as np
from numpy import pi
# importing Qiskit
from qiskit import QuantumCircuit, transpile, assemble, Aer
from qiskit.visualization import plot_histogram, plot_bloch_multivector
qc = QuantumCircuit(3)
qc.h(2)
qc.cp(pi/2, 1, 2)
qc.cp(pi/4, 0, 2)
qc.h(1)
qc.cp(pi/2, 0, 1)
qc.h(0)
qc.swap(0, 2)
qc.draw()
def qft_rotations(circuit, n):
if n == 0:
return circuit
n -= 1
circuit.h(n)
for qubit in range(n):
circuit.cp(pi/2**(n-qubit), qubit, n)
qft_rotations(circuit, n)
def swap_registers(circuit, n):
for qubit in range(n//2):
circuit.swap(qubit, n-qubit-1)
return circuit
def qft(circuit, n):
qft_rotations(circuit, n)
swap_registers(circuit, n)
return circuit
qc = QuantumCircuit(4)
qft(qc,4)
qc.draw()
# Create the circuit
qc = QuantumCircuit(3)
# Encode the state 5 (101 in binary)
qc.x(0)
qc.x(2)
qc.draw()
sim = Aer.get_backend("aer_simulator")
qc_init = qc.copy()
qc_init.save_statevector()
statevector = sim.run(qc_init).result().get_statevector()
plot_bloch_multivector(statevector)
qft(qc, 3)
qc.draw()
qc.save_statevector()
statevector = sim.run(qc).result().get_statevector()
plot_bloch_multivector(statevector)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import qiskit.qasm3
program = """
OPENQASM 3.0;
include "stdgates.inc";
input float[64] a;
qubit[3] q;
bit[2] mid;
bit[3] out;
let aliased = q[0:1];
gate my_gate(a) c, t {
gphase(a / 2);
ry(a) c;
cx c, t;
}
gate my_phase(a) c {
ctrl @ inv @ gphase(a) c;
}
my_gate(a * 2) aliased[0], q[{1, 2}][0];
measure q[0] -> mid[0];
measure q[1] -> mid[1];
while (mid == "00") {
reset q[0];
reset q[1];
my_gate(a) q[0], q[1];
my_phase(a - pi/2) q[1];
mid[0] = measure q[0];
mid[1] = measure q[1];
}
if (mid[0]) {
let inner_alias = q[{0, 1}];
reset inner_alias;
}
out = measure q;
"""
circuit = qiskit.qasm3.loads(program)
circuit.draw("mpl")
|
https://github.com/greatdevaks/geopython-qiskit
|
greatdevaks
|
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, assemble, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
# Importing basic math libraries
from math import pi, sqrt
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
# Definining the simulator: statevector_simulator, or qasm_simulator
simulator = "statevector_simulator"
# Function to plot the Quantum Circuit / Quantum State
def get_plot(quantum_circuit):
"""
Parameters:
quantum_circuit: The Quantum Circuit to visualize
Returns:
return: Plot/Visualization of the Quantum State
"""
sim = Aer.get_backend(simulator)
quantum_object = assemble(quantum_circuit)
if simulator == "statevector_simulator":
result = sim.run(quantum_object).result()
statevector = result.get_statevector()
return plot_bloch_multivector(statevector)
elif simulator == "qasm_simulator":
result = sim.run(quantum_object).result()
count = result.get_counts()
return plot_histogram(count)
# Creating a single qubit Quantum Circuit
num_qubits = 1
qc = QuantumCircuit(num_qubits) # Creates a Quantum Circuit with 1 qubit
# Visualizing the Quantum Wire
qc.draw() # Drawing the Quantum Circuit
# Visualizing the Quantum Circuit using Simulator
get_plot(qc) # Drawing the Quantum Circuit
# Pauli-X Gate Demonstration
# Let's do an X-gate on a |0> qubit
qc.x(0) # Applies Pauli-X Gate to the |0> qubit
qc.draw() # Drawing the Quantum Circuit
# Visualize the Quantum Circuit using Simulator
get_plot(qc)
|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit import compiler, BasicAer, QuantumRegister
from qiskit.converters import circuit_to_dag
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
def convert_to_basis_gates(circuit):
# unroll the circuit using the basis u1, u2, u3, cx, and id gates
unroller = Unroller(basis=['u1', 'u2', 'u3', 'cx', 'id'])
pm = PassManager(passes=[unroller])
qc = compiler.transpile(circuit, BasicAer.get_backend('qasm_simulator'), pass_manager=pm)
return qc
def is_qubit(qb):
# check if the input is a qubit, which is in the form (QuantumRegister, int)
return isinstance(qb, tuple) and isinstance(qb[0], QuantumRegister) and isinstance(qb[1], int)
def is_qubit_list(qbs):
# check if the input is a list of qubits
for qb in qbs:
if not is_qubit(qb):
return False
return True
def summarize_circuits(circuits):
"""Summarize circuits based on QuantumCircuit, and four metrics are summarized.
Number of qubits and classical bits, and number of operations and depth of circuits.
The average statistic is provided if multiple circuits are inputed.
Args:
circuits (QuantumCircuit or [QuantumCircuit]): the to-be-summarized circuits
"""
if not isinstance(circuits, list):
circuits = [circuits]
ret = ""
ret += "Submitting {} circuits.\n".format(len(circuits))
ret += "============================================================================\n"
stats = np.zeros(4)
for i, circuit in enumerate(circuits):
dag = circuit_to_dag(circuit)
depth = dag.depth()
width = dag.width()
size = dag.size()
classical_bits = dag.num_cbits()
op_counts = dag.count_ops()
stats[0] += width
stats[1] += classical_bits
stats[2] += size
stats[3] += depth
ret = ''.join([ret, "{}-th circuit: {} qubits, {} classical bits and {} operations with depth {}\n op_counts: {}\n".format(
i, width, classical_bits, size, depth, op_counts)])
if len(circuits) > 1:
stats /= len(circuits)
ret = ''.join([ret, "Average: {:.2f} qubits, {:.2f} classical bits and {:.2f} operations with depth {:.2f}\n".format(
stats[0], stats[1], stats[2], stats[3])])
ret += "============================================================================\n"
return ret
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, test_loader):
all_preds = torch.tensor([])
for batch in test_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 50
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
###This is CPU run not GPU
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2) #Added layer
x = F.relu(self.conv2(x))
x = self.dropout(x) #Added layer
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1) #Added layer
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
#x = self.dropout(x) omitting this layer
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 50
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/mmetcalf14/Hamiltonian_Downfolding_IBM
|
mmetcalf14
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.aqua.components.variational_forms import VariationalForm
class SwapRZ(VariationalForm):
"""Layers of Swap+Z rotations followed by entangling gates."""
CONFIGURATION = {
'name': 'SWAPRZ',
'description': 'SWAPRZ Variational Form',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'swaprz_schema',
'type': 'object',
'properties': {
'depth': {
'type': 'integer',
'default': 3,
'minimum': 1
},
'entanglement': {
'type': 'string',
'default': 'full',
'oneOf': [
{'enum': ['full', 'linear']}
]
},
'entangler_map': {
'type': ['array', 'null'],
'default': None
},
'skip_unentangled_qubits': {
'type': 'boolean',
'default': False
}
},
'additionalProperties': False
},
'depends': [
{
'pluggable_type': 'initial_state',
'default': {
'name': 'ZERO',
}
},
],
}
def __init__(self, num_qubits, depth=3, entangler_map=None,
entanglement='full', initial_state=None, skip_unentangled_qubits=False):
"""Constructor.
Args:
num_qubits (int) : number of qubits
depth (int) : number of rotation layers
entangler_map (list[list]): describe the connectivity of qubits, each list describes
[source, target], or None for full entanglement.
Note that the order is the list is the order of
applying the two-qubit gate.
entanglement (str): 'full' or 'linear'
initial_state (InitialState): an initial state object
skip_unentangled_qubits (bool): skip the qubits not in the entangler_map
"""
self.validate(locals())
super().__init__()
self._num_qubits = num_qubits
self._depth = depth
if entangler_map is None:
self._entangler_map = VariationalForm.get_entangler_map(entanglement, num_qubits)
else:
self._entangler_map = VariationalForm.validate_entangler_map(entangler_map, num_qubits)
# determine the entangled qubits
all_qubits = []
for src, targ in self._entangler_map:
all_qubits.extend([src, targ])
self._entangled_qubits = sorted(list(set(all_qubits)))
self._initial_state = initial_state
self._skip_unentangled_qubits = skip_unentangled_qubits
# for the first layer
self._num_parameters = len(self._entangled_qubits) if self._skip_unentangled_qubits \
else self._num_qubits
# for repeated block
self._num_parameters += (len(self._entangled_qubits) + len(self._entangler_map)) * depth
self._bounds = [(-np.pi, np.pi)] * self._num_parameters
def construct_circuit(self, parameters, q=None):
"""
Construct the variational form, given its parameters.
Args:
parameters (numpy.ndarray): circuit parameters
q (QuantumRegister): Quantum Register for the circuit.
Returns:
QuantumCircuit: a quantum circuit with given `parameters`
Raises:
ValueError: the number of parameters is incorrect.
"""
if len(parameters) != self._num_parameters:
raise ValueError('The number of parameters has to be {}'.format(self._num_parameters))
if q is None:
q = QuantumRegister(self._num_qubits, name='q')
if self._initial_state is not None:
circuit = self._initial_state.construct_circuit('circuit', q)
else:
circuit = QuantumCircuit(q)
param_idx = 0
for qubit in range(self._num_qubits):
if not self._skip_unentangled_qubits or qubit in self._entangled_qubits:
circuit.u1(parameters[param_idx], q[qubit]) # rz
param_idx += 1
for block in range(self._depth):
circuit.barrier(q)
for src, targ in self._entangler_map:
# XX
circuit.u2(0, np.pi, q[src])
circuit.u2(0, np.pi, q[targ])
circuit.cx(q[src], q[targ])
circuit.u1(parameters[param_idx], q[targ])
circuit.cx(q[src], q[targ])
circuit.u2(0, np.pi, q[src])
circuit.u2(0, np.pi, q[targ])
# YY
circuit.u3(np.pi / 2, -np.pi / 2, np.pi / 2, q[src])
circuit.u3(np.pi / 2, -np.pi / 2, np.pi / 2, q[targ])
circuit.cx(q[src], q[targ])
circuit.u1(parameters[param_idx], q[targ])
circuit.cx(q[src], q[targ])
circuit.u3(-np.pi / 2, -np.pi / 2, np.pi / 2, q[src])
circuit.u3(-np.pi / 2, -np.pi / 2, np.pi / 2, q[targ])
param_idx += 1
circuit.barrier(q)
for qubit in self._entangled_qubits:
circuit.u1(parameters[param_idx], q[qubit]) # rz
param_idx += 1
circuit.barrier(q)
return circuit
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.units import DistanceUnit
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver(
atom="H 0 0 0; H 0 0 0.735",
basis="sto3g",
charge=0,
spin=0,
unit=DistanceUnit.ANGSTROM,
)
es_problem = driver.run()
from qiskit_nature.second_q.mappers import JordanWignerMapper
mapper = JordanWignerMapper()
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
numpy_solver = NumPyMinimumEigensolver()
from qiskit.algorithms.minimum_eigensolvers import VQE
from qiskit.algorithms.optimizers import SLSQP
from qiskit.primitives import Estimator
from qiskit_nature.second_q.circuit.library import HartreeFock, UCCSD
ansatz = UCCSD(
es_problem.num_spatial_orbitals,
es_problem.num_particles,
mapper,
initial_state=HartreeFock(
es_problem.num_spatial_orbitals,
es_problem.num_particles,
mapper,
),
)
vqe_solver = VQE(Estimator(), ansatz, SLSQP())
vqe_solver.initial_point = [0.0] * ansatz.num_parameters
from qiskit.algorithms.minimum_eigensolvers import VQE
from qiskit.circuit.library import TwoLocal
tl_circuit = TwoLocal(
rotation_blocks=["h", "rx"],
entanglement_blocks="cz",
entanglement="full",
reps=2,
parameter_prefix="y",
)
another_solver = VQE(Estimator(), tl_circuit, SLSQP())
from qiskit_nature.second_q.algorithms import GroundStateEigensolver
calc = GroundStateEigensolver(mapper, vqe_solver)
res = calc.solve(es_problem)
print(res)
calc = GroundStateEigensolver(mapper, numpy_solver)
res = calc.solve(es_problem)
print(res)
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
from qiskit_nature.second_q.drivers import GaussianForcesDriver
from qiskit_nature.second_q.mappers import DirectMapper
from qiskit_nature.second_q.problems import HarmonicBasis
driver = GaussianForcesDriver(logfile="aux_files/CO2_freq_B3LYP_631g.log")
basis = HarmonicBasis([2, 2, 2, 2])
vib_problem = driver.run(basis=basis)
vib_problem.hamiltonian.truncation_order = 2
mapper = DirectMapper()
solver_without_filter = NumPyMinimumEigensolver()
solver_with_filter = NumPyMinimumEigensolver(
filter_criterion=vib_problem.get_default_filter_criterion()
)
gsc_wo = GroundStateEigensolver(mapper, solver_without_filter)
result_wo = gsc_wo.solve(vib_problem)
gsc_w = GroundStateEigensolver(mapper, solver_with_filter)
result_w = gsc_w.solve(vib_problem)
print(result_wo)
print("\n\n")
print(result_w)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit.quantumcircuitdata import CircuitInstruction
from qiskit.circuit import Measure
from qiskit.circuit.library import HGate, CXGate
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
instructions = [
CircuitInstruction(HGate(), [qr[0]], []),
CircuitInstruction(CXGate(), [qr[0], qr[1]], []),
CircuitInstruction(Measure(), [qr[0]], [cr[0]]),
CircuitInstruction(Measure(), [qr[1]], [cr[1]]),
]
circuit = QuantumCircuit.from_instructions(instructions)
circuit.draw("mpl")
|
https://github.com/Raunak-Singh-Inventor/quantum_algorithms_in_qiskit
|
Raunak-Singh-Inventor
|
my_list = [1, 3, 5, 6, 7, 9, 10, 3]
def the_oracle(my_input):
winner = 7
return my_input is winner
for index, trial_number in enumerate(my_list):
if the_oracle(trial_number) is True:
print("Winner found at index: %i"%index)
print("%i calls to the Oracle used"%(index+1))
break
from qiskit import *
import matplotlib.pyplot as plt
import numpy as np
# define the oracle circuit
oracle = QuantumCircuit(2, name="oracle")
oracle.cz(0,1)
oracle.to_gate()
oracle.draw()
backend = Aer.get_backend("statevector_simulator")
grover_circ = QuantumCircuit(2, 2)
grover_circ.h([0, 1])
grover_circ.append(oracle, [0, 1])
grover_circ.draw()
job = execute(grover_circ, backend)
result = job.result()
sv = result.get_statevector()
np.around(sv, 2)
reflection = QuantumCircuit(2, name="reflection")
reflection.h([0, 1])
reflection.z([0, 1])
reflection.cz(0, 1)
reflection.h([0, 1])
reflection.to_gate()
reflection.draw()
backend = Aer.get_backend("qasm_simulator")
grover_circ = QuantumCircuit(2,2)
grover_circ.h([0, 1])
grover_circ.append(oracle, [0, 1])
grover_circ.append(reflection, [0, 1])
grover_circ.measure([0, 1], [0, 1])
grover_circ.draw()
job = execute(grover_circ, backend, shots = 10000)
result = job.result()
result.get_counts()
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Generic BackendV2 class that with a simulated ``run``."""
from __future__ import annotations
import warnings
from collections.abc import Iterable
import numpy as np
from qiskit import pulse
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.circuit import QuantumCircuit, Instruction
from qiskit.circuit.controlflow import (
IfElseOp,
WhileLoopOp,
ForLoopOp,
SwitchCaseOp,
BreakLoopOp,
ContinueLoopOp,
)
from qiskit.circuit.library.standard_gates import get_standard_gate_name_mapping
from qiskit.exceptions import QiskitError
from qiskit.transpiler import CouplingMap, Target, InstructionProperties, QubitProperties
from qiskit.providers import Options
from qiskit.providers.basic_provider import BasicSimulator
from qiskit.providers.backend import BackendV2
from qiskit.providers.models import (
PulseDefaults,
Command,
)
from qiskit.qobj import PulseQobjInstruction, PulseLibraryItem
from qiskit.utils import optionals as _optionals
# Noise default values/ranges for duration and error of supported
# instructions. There are two possible formats:
# - (min_duration, max_duration, min_error, max_error),
# if the defaults are ranges.
# - (duration, error), if the defaults are fixed values.
_NOISE_DEFAULTS = {
"cx": (7.992e-08, 8.99988e-07, 1e-5, 5e-3),
"ecr": (7.992e-08, 8.99988e-07, 1e-5, 5e-3),
"cz": (7.992e-08, 8.99988e-07, 1e-5, 5e-3),
"id": (2.997e-08, 5.994e-08, 9e-5, 1e-4),
"rz": (0.0, 0.0),
"sx": (2.997e-08, 5.994e-08, 9e-5, 1e-4),
"x": (2.997e-08, 5.994e-08, 9e-5, 1e-4),
"measure": (6.99966e-07, 1.500054e-06, 1e-5, 5e-3),
"delay": (None, None),
"reset": (None, None),
}
# Fallback values for gates with unknown noise default ranges.
_NOISE_DEFAULTS_FALLBACK = {
"1-q": (2.997e-08, 5.994e-08, 9e-5, 1e-4),
"multi-q": (7.992e-08, 8.99988e-07, 5e-3),
}
# Ranges to sample qubit properties from.
_QUBIT_PROPERTIES = {
"dt": 0.222e-9,
"t1": (100e-6, 200e-6),
"t2": (100e-6, 200e-6),
"frequency": (5e9, 5.5e9),
}
# The number of samples determines the pulse durations of the corresponding
# instructions. This default defines pulses with durations in multiples of
# 16 dt for consistency with the pulse granularity of real IBM devices, but
# keeps the number smaller than what would be realistic for
# manageability. If needed, more realistic durations could be added in the
# future (order of 160dt for 1q gates, 1760dt for 2q gates and measure).
_PULSE_LIBRARY = [
PulseLibraryItem(name="pulse_1", samples=np.linspace(0, 1.0, 16, dtype=np.complex128)), # 16dt
PulseLibraryItem(name="pulse_2", samples=np.linspace(0, 1.0, 32, dtype=np.complex128)), # 32dt
PulseLibraryItem(name="pulse_3", samples=np.linspace(0, 1.0, 64, dtype=np.complex128)), # 64dt
]
class GenericBackendV2(BackendV2):
"""Generic :class:`~.BackendV2` implementation with a configurable constructor. This class will
return a :class:`~.BackendV2` instance that runs on a local simulator (in the spirit of fake
backends) and contains all the necessary information to test backend-interfacing components, such
as the transpiler. A :class:`.GenericBackendV2` instance can be constructed from as little as a
specified ``num_qubits``, but users can additionally configure the basis gates, coupling map,
ability to run dynamic circuits (control flow instructions), instruction calibrations and dtm.
The remainder of the backend properties are generated by randomly sampling
from default ranges extracted from historical IBM backend data. The seed for this random
generation can be fixed to ensure the reproducibility of the backend output.
This backend only supports gates in the standard library, if you need a more flexible backend,
there is always the option to directly instantiate a :class:`.Target` object to use for
transpilation.
"""
def __init__(
self,
num_qubits: int,
basis_gates: list[str] | None = None,
*,
coupling_map: list[list[int]] | CouplingMap | None = None,
control_flow: bool = False,
calibrate_instructions: bool | InstructionScheduleMap | None = None,
dtm: float | None = None,
seed: int | None = None,
):
"""
Args:
num_qubits: Number of qubits that will be used to construct the backend's target.
Note that, while there is no limit in the size of the target that can be
constructed, this backend runs on local noisy simulators, and these might
present limitations in the number of qubits that can be simulated.
basis_gates: List of basis gate names to be supported by
the target. These must be part of the standard qiskit circuit library.
The default set of basis gates is ``["id", "rz", "sx", "x", "cx"]``
The ``"reset"``, ``"delay"``, and ``"measure"`` instructions are
always supported by default, even if not specified via ``basis_gates``.
coupling_map: Optional coupling map
for the backend. Multiple formats are supported:
#. :class:`~.CouplingMap` instance
#. List, must be given as an edge list representing the two qubit interactions
supported by the backend, for example:
``[[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]``
If ``coupling_map`` is specified, it must match the number of qubits
specified in ``num_qubits``. If ``coupling_map`` is not specified,
a fully connected coupling map will be generated with ``num_qubits``
qubits.
control_flow: Flag to enable control flow directives on the target
(defaults to False).
calibrate_instructions: Instruction calibration settings, this argument
supports both boolean and :class:`.InstructionScheduleMap` as
input types, and is ``None`` by default:
#. If ``calibrate_instructions==None``, no calibrations will be added to the target.
#. If ``calibrate_instructions==True``, all gates will be calibrated for all
qubits using the default pulse schedules generated internally.
#. If ``calibrate_instructions==False``, all gates will be "calibrated" for
all qubits with an empty pulse schedule.
#. If an :class:`.InstructionScheduleMap` instance is given, the calibrations
in this instruction schedule map will be appended to the target
instead of the default pulse schedules (this allows for custom calibrations).
dtm: System time resolution of output signals in nanoseconds.
None by default.
seed: Optional seed for generation of default values.
"""
super().__init__(
provider=None,
name=f"generic_backend_{num_qubits}q",
description=f"This is a device with {num_qubits} qubits and generic settings.",
backend_version="",
)
self._sim = None
self._rng = np.random.default_rng(seed=seed)
self._dtm = dtm
self._num_qubits = num_qubits
self._control_flow = control_flow
self._calibrate_instructions = calibrate_instructions
self._supported_gates = get_standard_gate_name_mapping()
if coupling_map is None:
self._coupling_map = CouplingMap().from_full(num_qubits)
else:
if isinstance(coupling_map, CouplingMap):
self._coupling_map = coupling_map
else:
self._coupling_map = CouplingMap(coupling_map)
if num_qubits != self._coupling_map.size():
raise QiskitError(
f"The number of qubits (got {num_qubits}) must match "
f"the size of the provided coupling map (got {self._coupling_map.size()})."
)
self._basis_gates = (
basis_gates if basis_gates is not None else ["cx", "id", "rz", "sx", "x"]
)
for name in ["reset", "delay", "measure"]:
if name not in self._basis_gates:
self._basis_gates.append(name)
self._build_generic_target()
self._build_default_channels()
@property
def target(self):
return self._target
@property
def max_circuits(self):
return None
@property
def dtm(self) -> float:
"""Return the system time resolution of output signals"""
# converting `dtm` from nanoseconds to seconds
return self._dtm * 1e-9 if self._dtm is not None else None
@property
def meas_map(self) -> list[list[int]]:
return self._target.concurrent_measurements
def _build_default_channels(self) -> None:
channels_map = {
"acquire": {(i,): [pulse.AcquireChannel(i)] for i in range(self.num_qubits)},
"drive": {(i,): [pulse.DriveChannel(i)] for i in range(self.num_qubits)},
"measure": {(i,): [pulse.MeasureChannel(i)] for i in range(self.num_qubits)},
"control": {
(edge): [pulse.ControlChannel(i)] for i, edge in enumerate(self._coupling_map)
},
}
setattr(self, "channels_map", channels_map)
def _get_noise_defaults(self, name: str, num_qubits: int) -> tuple:
"""Return noise default values/ranges for duration and error of supported
instructions. There are two possible formats:
- (min_duration, max_duration, min_error, max_error),
if the defaults are ranges.
- (duration, error), if the defaults are fixed values.
"""
if name in _NOISE_DEFAULTS:
return _NOISE_DEFAULTS[name]
if num_qubits == 1:
return _NOISE_DEFAULTS_FALLBACK["1-q"]
return _NOISE_DEFAULTS_FALLBACK["multi-q"]
def _get_calibration_sequence(
self, inst: str, num_qubits: int, qargs: tuple[int]
) -> list[PulseQobjInstruction]:
"""Return calibration pulse sequence for given instruction (defined by name and num_qubits)
acting on qargs.
"""
pulse_library = _PULSE_LIBRARY
# Note that the calibration pulses are different for
# 1q gates vs 2q gates vs measurement instructions.
if inst == "measure":
sequence = [
PulseQobjInstruction(
name="acquire",
duration=1792,
t0=0,
qubits=qargs,
memory_slot=qargs,
)
] + [PulseQobjInstruction(name=pulse_library[1].name, ch=f"m{i}", t0=0) for i in qargs]
return sequence
if num_qubits == 1:
return [
PulseQobjInstruction(name="fc", ch=f"u{qargs[0]}", t0=0, phase="-P0"),
PulseQobjInstruction(name=pulse_library[0].name, ch=f"d{qargs[0]}", t0=0),
]
return [
PulseQobjInstruction(name=pulse_library[1].name, ch=f"d{qargs[0]}", t0=0),
PulseQobjInstruction(name=pulse_library[2].name, ch=f"u{qargs[0]}", t0=0),
PulseQobjInstruction(name=pulse_library[1].name, ch=f"d{qargs[1]}", t0=0),
PulseQobjInstruction(name="fc", ch=f"d{qargs[1]}", t0=0, phase=2.1),
]
def _generate_calibration_defaults(self) -> PulseDefaults:
"""Generate pulse calibration defaults as specified with `self._calibrate_instructions`.
If `self._calibrate_instructions` is True, the pulse schedules will be generated from
a series of default calibration sequences. If `self._calibrate_instructions` is False,
the pulse schedules will contain empty calibration sequences, but still be generated and
added to the target.
"""
# If self._calibrate_instructions==True, this method
# will generate default pulse schedules for all gates in self._basis_gates,
# except for `delay` and `reset`.
calibration_buffer = self._basis_gates.copy()
for inst in ["delay", "reset"]:
calibration_buffer.remove(inst)
# List of calibration commands (generated from sequences of PulseQobjInstructions)
# corresponding to each calibrated instruction. Note that the calibration pulses
# are different for 1q gates vs 2q gates vs measurement instructions.
cmd_def = []
for inst in calibration_buffer:
num_qubits = self._supported_gates[inst].num_qubits
qarg_set = self._coupling_map if num_qubits > 1 else list(range(self.num_qubits))
if inst == "measure":
cmd_def.append(
Command(
name=inst,
qubits=qarg_set,
sequence=(
self._get_calibration_sequence(inst, num_qubits, qarg_set)
if self._calibrate_instructions
else []
),
)
)
else:
for qarg in qarg_set:
qubits = [qarg] if num_qubits == 1 else qarg
cmd_def.append(
Command(
name=inst,
qubits=qubits,
sequence=(
self._get_calibration_sequence(inst, num_qubits, qubits)
if self._calibrate_instructions
else []
),
)
)
qubit_freq_est = np.random.normal(4.8, scale=0.01, size=self.num_qubits).tolist()
meas_freq_est = np.linspace(6.4, 6.6, self.num_qubits).tolist()
return PulseDefaults(
qubit_freq_est=qubit_freq_est,
meas_freq_est=meas_freq_est,
buffer=0,
pulse_library=_PULSE_LIBRARY,
cmd_def=cmd_def,
)
def _build_generic_target(self):
"""This method generates a :class:`~.Target` instance with
default qubit, instruction and calibration properties.
"""
# the qubit properties are sampled from default ranges
properties = _QUBIT_PROPERTIES
self._target = Target(
description=f"Generic Target with {self._num_qubits} qubits",
num_qubits=self._num_qubits,
dt=properties["dt"],
qubit_properties=[
QubitProperties(
t1=self._rng.uniform(properties["t1"][0], properties["t1"][1]),
t2=self._rng.uniform(properties["t2"][0], properties["t2"][1]),
frequency=self._rng.uniform(
properties["frequency"][0], properties["frequency"][1]
),
)
for _ in range(self._num_qubits)
],
concurrent_measurements=[list(range(self._num_qubits))],
)
# Generate instruction schedule map with calibrations to add to target.
calibration_inst_map = None
if self._calibrate_instructions is not None:
if isinstance(self._calibrate_instructions, InstructionScheduleMap):
calibration_inst_map = self._calibrate_instructions
else:
defaults = self._generate_calibration_defaults()
calibration_inst_map = defaults.instruction_schedule_map
# Iterate over gates, generate noise params from defaults,
# and add instructions, noise and calibrations to target.
for name in self._basis_gates:
if name not in self._supported_gates:
raise QiskitError(
f"Provided basis gate {name} is not an instruction "
f"in the standard qiskit circuit library."
)
gate = self._supported_gates[name]
noise_params = self._get_noise_defaults(name, gate.num_qubits)
self._add_noisy_instruction_to_target(gate, noise_params, calibration_inst_map)
if self._control_flow:
self._target.add_instruction(IfElseOp, name="if_else")
self._target.add_instruction(WhileLoopOp, name="while_loop")
self._target.add_instruction(ForLoopOp, name="for_loop")
self._target.add_instruction(SwitchCaseOp, name="switch_case")
self._target.add_instruction(BreakLoopOp, name="break")
self._target.add_instruction(ContinueLoopOp, name="continue")
def _add_noisy_instruction_to_target(
self,
instruction: Instruction,
noise_params: tuple[float, ...] | None,
calibration_inst_map: InstructionScheduleMap | None,
) -> None:
"""Add instruction properties to target for specified instruction.
Args:
instruction: Instance of instruction to be added to the target
noise_params: Error and duration noise values/ranges to
include in instruction properties.
calibration_inst_map: Instruction schedule map with calibration defaults
"""
qarg_set = self._coupling_map if instruction.num_qubits > 1 else range(self.num_qubits)
props = {}
for qarg in qarg_set:
try:
qargs = tuple(qarg)
except TypeError:
qargs = (qarg,)
duration, error = (
noise_params
if len(noise_params) == 2
else (self._rng.uniform(*noise_params[:2]), self._rng.uniform(*noise_params[2:]))
)
if (
calibration_inst_map is not None
and instruction.name not in ["reset", "delay"]
and qarg in calibration_inst_map.qubits_with_instruction(instruction.name)
):
# Do NOT call .get method. This parses Qobj immediately.
# This operation is computationally expensive and should be bypassed.
calibration_entry = calibration_inst_map._get_calibration_entry(
instruction.name, qargs
)
else:
calibration_entry = None
if duration is not None and len(noise_params) > 2:
# Ensure exact conversion of duration from seconds to dt
dt = _QUBIT_PROPERTIES["dt"]
rounded_duration = round(duration / dt) * dt
# Clamp rounded duration to be between min and max values
duration = max(noise_params[0], min(rounded_duration, noise_params[1]))
props.update({qargs: InstructionProperties(duration, error, calibration_entry)})
self._target.add_instruction(instruction, props)
# The "measure" instruction calibrations need to be added qubit by qubit, once the
# instruction has been added to the target.
if calibration_inst_map is not None and instruction.name == "measure":
for qarg in calibration_inst_map.qubits_with_instruction(instruction.name):
try:
qargs = tuple(qarg)
except TypeError:
qargs = (qarg,)
# Do NOT call .get method. This parses Qobj immediately.
# This operation is computationally expensive and should be bypassed.
calibration_entry = calibration_inst_map._get_calibration_entry(
instruction.name, qargs
)
for qubit in qargs:
if qubit < self.num_qubits:
self._target[instruction.name][(qubit,)].calibration = calibration_entry
def run(self, run_input, **options):
"""Run on the backend using a simulator.
This method runs circuit jobs (an individual or a list of :class:`~.QuantumCircuit`
) and pulse jobs (an individual or a list of :class:`~.Schedule` or
:class:`~.ScheduleBlock`) using :class:`~.BasicSimulator` or Aer simulator and returns a
:class:`~qiskit.providers.Job` object.
If qiskit-aer is installed, jobs will be run using the ``AerSimulator`` with
noise model of the backend. Otherwise, jobs will be run using the
``BasicSimulator`` simulator without noise.
Noisy simulations of pulse jobs are not yet supported in :class:`~.GenericBackendV2`.
Args:
run_input (QuantumCircuit or Schedule or ScheduleBlock or list): An
individual or a list of
:class:`~qiskit.circuit.QuantumCircuit`,
:class:`~qiskit.pulse.ScheduleBlock`, or
:class:`~qiskit.pulse.Schedule` objects to run on the backend.
options: Any kwarg options to pass to the backend for running the
config. If a key is also present in the options
attribute/object, then the expectation is that the value
specified will be used instead of what's set in the options
object.
Returns:
Job: The job object for the run
Raises:
QiskitError: If a pulse job is supplied and qiskit_aer is not installed.
"""
circuits = run_input
pulse_job = None
if isinstance(circuits, (pulse.Schedule, pulse.ScheduleBlock)):
pulse_job = True
elif isinstance(circuits, QuantumCircuit):
pulse_job = False
elif isinstance(circuits, list):
if circuits:
if all(isinstance(x, (pulse.Schedule, pulse.ScheduleBlock)) for x in circuits):
pulse_job = True
elif all(isinstance(x, QuantumCircuit) for x in circuits):
pulse_job = False
if pulse_job is None: # submitted job is invalid
raise QiskitError(
"Invalid input object %s, must be either a "
"QuantumCircuit, Schedule, or a list of either" % circuits
)
if pulse_job: # pulse job
raise QiskitError("Pulse simulation is currently not supported for V2 backends.")
# circuit job
if not _optionals.HAS_AER:
warnings.warn("Aer not found using BasicSimulator and no noise", RuntimeWarning)
if self._sim is None:
self._setup_sim()
self._sim._options = self._options
job = self._sim.run(circuits, **options)
return job
def _setup_sim(self) -> None:
if _optionals.HAS_AER:
from qiskit_aer import AerSimulator
from qiskit_aer.noise import NoiseModel
self._sim = AerSimulator()
noise_model = NoiseModel.from_backend(self)
self._sim.set_options(noise_model=noise_model)
# Update backend default too to avoid overwriting
# it when run() is called
self.set_options(noise_model=noise_model)
else:
self._sim = BasicSimulator()
@classmethod
def _default_options(cls) -> Options:
with warnings.catch_warnings(): # TODO remove catch once aer release without Provider ABC
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=".+abstract Provider and ProviderV1.+",
)
if _optionals.HAS_AER:
from qiskit_aer import AerSimulator
return AerSimulator._default_options()
else:
return BasicSimulator._default_options()
def drive_channel(self, qubit: int):
drive_channels_map = getattr(self, "channels_map", {}).get("drive", {})
qubits = (qubit,)
if qubits in drive_channels_map:
return drive_channels_map[qubits][0]
return None
def measure_channel(self, qubit: int):
measure_channels_map = getattr(self, "channels_map", {}).get("measure", {})
qubits = (qubit,)
if qubits in measure_channels_map:
return measure_channels_map[qubits][0]
return None
def acquire_channel(self, qubit: int):
acquire_channels_map = getattr(self, "channels_map", {}).get("acquire", {})
qubits = (qubit,)
if qubits in acquire_channels_map:
return acquire_channels_map[qubits][0]
return None
def control_channel(self, qubits: Iterable[int]):
control_channels_map = getattr(self, "channels_map", {}).get("control", {})
qubits = tuple(qubits)
if qubits in control_channels_map:
return control_channels_map[qubits]
return []
|
https://github.com/AMevans12/Quantum-Codes-Qiskit-Module-
|
AMevans12
|
from qiskit import *
circuit = QuantumCircuit(3,3)
%matplotlib inline
circuit.draw(output='mpl')
circuit.x(0)
circuit.barrier()
circuit.draw(output='mpl')
circuit.h(1)
circuit.cx(1,2)
circuit.barrier()
circuit.draw(output='mpl')
circuit.cx(0,1)
circuit.h(0)
circuit.barrier()
circuit.draw(output='mpl')
circuit.measure([0, 1], [0, 1])
circuit.barrier()
circuit.draw(output='mpl')
circuit.cx(1, 2)
circuit.cz(0, 2)
circuit.measure([2], [2])
circuit.draw(output='mpl')
simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend=simulator, shots=1024).result()
from qiskit.visualization import plot_histogram
plot_histogram(result.get_counts(circuit))
# Quantum Computer
from qiskit import IBMQ
IBMQ.save_account('e9857a49c124d22110b0c577754d160e984ce3f01e5ef110ae6295f3f6c4a6f337a129dfdcb5a1191a9b8c69317e9ae32ed4d196b744571d45453db7fb56d933')
IBMQ.load_account()
provider = IBMQ.get_provider(hub = 'ibm-q')
qcomp = provider.get_backend('ibm_brisbane')
import qiskit.tools.jupyter
%qiskit_job_watcher
job = execute(circuit, backend=qcomp)
result = job.result()
plot_histogram(result.get_counts(circuit))
from qiskit import *
circuit = QuantumCircuit(3,3)
%matplotlib inline
circuit.draw(output='mpl')
circuit.x(0)
circuit.barrier()
circuit.draw(output='mpl')
circuit.h(1)
circuit.cx(1,2)
circuit.barrier()
circuit.draw(output='mpl')
circuit.cx(0,1)
circuit.h(0)
circuit.barrier()
circuit.draw(output='mpl')
circuit.measure([0, 1], [0, 1])
circuit.barrier()
circuit.draw(output='mpl')
circuit.cx(1, 2)
circuit.cz(0, 2)
circuit.measure([2], [2])
circuit.draw(output='mpl')
simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend=simulator, shots=1024).result()
from qiskit.visualization import plot_histogram
plot_histogram(result.get_counts(circuit))
# Quantum Computer
from qiskit import IBMQ
IBMQ.save_account('e9857a49c124d22110b0c577754d160e984ce3f01e5ef110ae6295f3f6c4a6f337a129dfdcb5a1191a9b8c69317e9ae32ed4d196b744571d45453db7fb56d933')
IBMQ.load_account()
provider = IBMQ.get_provider(hub = 'ibm-q')
qcomp = provider.get_backend('ibm_brisbane')
import qiskit.tools.jupyter
%qiskit_job_watcher
job = execute(circuit, backend=qcomp)
result = job.result()
plot_histogram(result.get_counts(circuit))
|
https://github.com/qiskit-community/qiskit-jku-provider
|
qiskit-community
|
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Test JKU backend."""
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import execute
from qiskit_jku_provider import QasmSimulator
from .common import QiskitTestCase
class JKUBackendTestCase(QiskitTestCase):
"""Tests for the JKU backend."""
def setUp(self):
super().setUp()
self.backend = QasmSimulator(silent=True)
def test_configuration(self):
"""Test backend.configuration()."""
configuration = self.backend.configuration()
return configuration
def test_properties(self):
"""Test backend.properties()."""
properties = self.backend.properties()
self.assertEqual(properties, None)
def test_status(self):
"""Test backend.status()."""
status = self.backend.status()
return status
def test_run_circuit(self):
"""Test running a single circuit."""
result = execute(bell(), self.backend, seed_transpiler=34342).result()
self.assertEqual(result.success, True)
return result
def bell():
"""Return a Bell circuit."""
qr = QuantumRegister(2, name='qr')
cr = ClassicalRegister(2, name='qc')
qc = QuantumCircuit(qr, cr, name='bell')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.measure(qr, cr)
return qc
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Depth pass testing"""
import unittest
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.converters import circuit_to_dag
from qiskit.transpiler.passes import CountOpsLongestPath
from qiskit.test import QiskitTestCase
class TestCountOpsLongestPathPass(QiskitTestCase):
"""Tests for CountOpsLongestPath analysis methods."""
def test_empty_dag(self):
"""Empty DAG has empty counts."""
circuit = QuantumCircuit()
dag = circuit_to_dag(circuit)
pass_ = CountOpsLongestPath()
_ = pass_.run(dag)
self.assertDictEqual(pass_.property_set["count_ops_longest_path"], {})
def test_just_qubits(self):
"""A dag with 9 operations (3 CXs, 2Xs, 2Ys and 2 Hs) on the longest
path
"""
# βββββββββββββββ
# q0_0: βββ βββ€ X ββ€ Y ββ€ H ββββ ββββββββββββββββββββ ββ
# βββ΄ββββββββββββββββββββ΄ββββββββββββββββββββ΄ββ
# q0_1: β€ X βββββββββββββββββ€ X ββ€ X ββ€ Y ββ€ H ββ€ X β
# βββββ βββββββββββββββββββββββββ
qr = QuantumRegister(2)
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.x(qr[0])
circuit.y(qr[0])
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
circuit.x(qr[1])
circuit.y(qr[1])
circuit.h(qr[1])
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
pass_ = CountOpsLongestPath()
_ = pass_.run(dag)
count_ops = pass_.property_set["count_ops_longest_path"]
self.assertDictEqual(count_ops, {"cx": 3, "x": 2, "y": 2, "h": 2})
if __name__ == "__main__":
unittest.main()
|
https://github.com/sebasmos/QuantumVE
|
sebasmos
|
# Define the model name
model_name = "efficientnet_v2_m" #EfficientNet_B7_Weights.IMAGENET1K_V1
!pwd
%cd Vector_Embeddings
!pwd
import torchvision.models as models
import torch
MODEL_CONSTRUCTORS = {
'alexnet': models.alexnet,
'convnext_base': models.convnext_base,
'convnext_large': models.convnext_large,
'convnext_small': models.convnext_small,
'convnext_tiny': models.convnext_tiny,
'densenet121': models.densenet121,
'densenet161': models.densenet161,
'densenet169': models.densenet169,
'densenet201': models.densenet201,
'efficientnet_b0': models.efficientnet_b0,
'efficientnet_b1': models.efficientnet_b1,
'efficientnet_b2': models.efficientnet_b2,
'efficientnet_b3': models.efficientnet_b3,
'efficientnet_b4': models.efficientnet_b4,
'efficientnet_b5': models.efficientnet_b5,
'efficientnet_b6': models.efficientnet_b6,
'efficientnet_b7': models.efficientnet_b7,
'efficientnet_v2_l': models.efficientnet_v2_l,
'efficientnet_v2_m': models.efficientnet_v2_m,
'efficientnet_v2_s': models.efficientnet_v2_s,
'googlenet': models.googlenet,
'inception_v3': models.inception_v3,
'maxvit_t': models.maxvit_t,
'mnasnet0_5': models.mnasnet0_5,
'mnasnet0_75': models.mnasnet0_75,
'mnasnet1_0': models.mnasnet1_0,
'mnasnet1_3': models.mnasnet1_3,
'mobilenet_v2': models.mobilenet_v2,
'mobilenet_v3_large': models.mobilenet_v3_large,
'mobilenet_v3_small': models.mobilenet_v3_small,
'regnet_x_16gf': models.regnet_x_16gf,
'regnet_x_1_6gf': models.regnet_x_1_6gf,
'regnet_x_32gf': models.regnet_x_32gf,
'regnet_x_3_2gf': models.regnet_x_3_2gf,
'regnet_x_400mf': models.regnet_x_400mf,
'regnet_x_800mf': models.regnet_x_800mf,
'regnet_x_8gf': models.regnet_x_8gf,
'regnet_y_128gf': models.regnet_y_128gf,# check this regnet_y_128gf: no weigthts avaialble
'regnet_y_16gf': models.regnet_y_16gf,
'regnet_y_1_6gf': models.regnet_y_1_6gf,
'regnet_y_32gf': models.regnet_y_32gf,
'regnet_y_3_2gf': models.regnet_y_3_2gf,
'regnet_y_400mf': models.regnet_y_400mf,
'regnet_y_800mf': models.regnet_y_800mf,
'regnet_y_8gf': models.regnet_y_8gf,
'resnet101': models.resnet101,
'resnet152': models.resnet152,
'resnet18': models.resnet18,
'resnet34': models.resnet34,
'resnet50': models.resnet50,
'resnext101_32x8d': models.resnext101_32x8d,
'resnext101_64x4d': models.resnext101_64x4d,
'resnext50_32x4d': models.resnext50_32x4d,
'shufflenet_v2_x0_5': models.shufflenet_v2_x0_5,
'shufflenet_v2_x1_0': models.shufflenet_v2_x1_0,
'shufflenet_v2_x1_5': models.shufflenet_v2_x1_5,
'shufflenet_v2_x2_0': models.shufflenet_v2_x2_0,
'squeezenet1_0': models.squeezenet1_0,
'squeezenet1_1': models.squeezenet1_1,
'swin_b': models.swin_b,
'swin_s': models.swin_s,
'swin_t': models.swin_t,
'swin_v2_b': models.swin_v2_b,
'swin_v2_s': models.swin_v2_s,
'swin_v2_t': models.swin_v2_t,
'vgg11': models.vgg11,
'vgg11_bn': models.vgg11_bn,
'vgg13': models.vgg13,
'vgg13_bn': models.vgg13_bn,
'vgg16': models.vgg16,
'vgg16_bn': models.vgg16_bn,
'vgg19': models.vgg19,
'vgg19_bn': models.vgg19_bn,
'vit_b_16': models.vit_b_16,
'vit_b_32': models.vit_b_32,
'vit_h_14': models.vit_h_14,# and this..no weigthts avaialble
'vit_l_16': models.vit_l_16,
'vit_l_32': models.vit_l_32,
'wide_resnet101_2': models.wide_resnet101_2,
'wide_resnet50_2': models.wide_resnet50_2
}
# Create experiment directory
EXPERIMENT_NAME = f"{model_name}_embeddings"
import os
os.makedirs(EXPERIMENT_NAME, exist_ok=True)
train_path = f"{EXPERIMENT_NAME}/train"
val_path = f"{EXPERIMENT_NAME}/val"
os.makedirs(train_path, exist_ok=True)
os.makedirs(val_path, exist_ok=True)
import sys
sys.path.insert(0,'../')
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import random_split
from torch.utils.data import Subset, DataLoader, random_split
from torchvision import datasets, transforms
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
import pandas as pd
# from MAE code
from util.datasets import build_dataset
import argparse
import util.misc as misc
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import timm
assert timm.__version__ == "0.3.2" # version check
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
import util.lr_decay as lrd
import util.misc as misc
from util.datasets import build_dataset
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_vit
import sys
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import models_mae
import torch; print(f'numpy version: {np.__version__}\nCUDA version: {torch.version.cuda} - Torch versteion: {torch.__version__} - device count: {torch.cuda.device_count()}')
from engine_finetune import train_one_epoch, evaluate
from timm.data import Mixup
from timm.utils import accuracy
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from itertools import cycle
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
import torch.optim as optim
import torchvision.models as models
import torch.nn as nn
import torch
import pandas as pd
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, fbeta_score
from sklearn.metrics import precision_score, recall_score, f1_score, fbeta_score
import numpy as np
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_std = np.array([0.229, 0.224, 0.225])
def show_image(image, title=''):
# image is [H, W, 3]
assert image.shape[2] == 3
plt.imshow(torch.clip((image * imagenet_std + imagenet_mean) * 255, 0, 255).int())
plt.title(title, fontsize=16)
plt.axis('off')
return
def prepare_model(chkpt_dir, arch='mae_vit_large_patch16'):
# build model
model = getattr(models_mae, arch)()
# load model
checkpoint = torch.load(chkpt_dir, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'], strict=False)
print(msg)
return model
def plot_multiclass_roc_curve(all_labels, all_predictions, EXPERIMENT_NAME="."):
# Step 1: Label Binarization
label_binarizer = LabelBinarizer()
y_onehot = label_binarizer.fit_transform(all_labels)
all_predictions_hot = label_binarizer.transform(all_predictions)
# Step 2: Calculate ROC curves
fpr = dict()
tpr = dict()
roc_auc = dict()
unique_classes = range(y_onehot.shape[1])
for i in unique_classes:
fpr[i], tpr[i], _ = roc_curve(y_onehot[:, i], all_predictions_hot[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Step 3: Plot ROC curves
fig, ax = plt.subplots(figsize=(8, 8))
# Micro-average ROC curve
fpr_micro, tpr_micro, _ = roc_curve(y_onehot.ravel(), all_predictions_hot.ravel())
roc_auc_micro = auc(fpr_micro, tpr_micro)
plt.plot(
fpr_micro,
tpr_micro,
label=f"micro-average ROC curve (AUC = {roc_auc_micro:.2f})",
color="deeppink",
linestyle=":",
linewidth=4,
)
# Macro-average ROC curve
all_fpr = np.unique(np.concatenate([fpr[i] for i in unique_classes]))
mean_tpr = np.zeros_like(all_fpr)
for i in unique_classes:
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= len(unique_classes)
fpr_macro = all_fpr
tpr_macro = mean_tpr
roc_auc_macro = auc(fpr_macro, tpr_macro)
plt.plot(
fpr_macro,
tpr_macro,
label=f"macro-average ROC curve (AUC = {roc_auc_macro:.2f})",
color="navy",
linestyle=":",
linewidth=4,
)
# Individual class ROC curves with unique colors
colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_classes)))
for class_id, color in zip(unique_classes, colors):
plt.plot(
fpr[class_id],
tpr[class_id],
color=color,
label=f"ROC curve for Class {class_id} (AUC = {roc_auc[class_id]:.2f})",
linewidth=2,
)
plt.plot([0, 1], [0, 1], color='gray', linestyle='--', linewidth=2) # Add diagonal line for reference
plt.axis("equal")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Extension of Receiver Operating Characteristic\n to One-vs-Rest multiclass")
plt.legend()
plt.savefig(f'{EXPERIMENT_NAME}/roc_curve.png')
plt.show()
# Set the seed for PyTorch
torch.manual_seed(42)
parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--accum_iter', default=4, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='mobilenet_v3', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=5e-4, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.65,
help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='mae_pretrain_vit_base.pth',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/media/enc/vera1/sebastian/data/ABGQI_mel_spectrograms', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=5, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='quinn_5_classes',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='/media/enc/vera1/sebastian/codes/classifiers/mae/MobileNet/output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default="/media/enc/vera1/sebastian/codes/classifiers/mae/MobileNet/quinn_5_classes/checkpoint-49.pth",
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval',default=True, action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
args, unknown = parser.parse_known_args()
misc.init_distributed_mode(args)
print("{}".format(args).replace(', ', ',\n'))
os.makedirs(args.output_dir, exist_ok=True)
device = torch.device(args.device)
misc.init_distributed_mode(args)
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
def count_parameters(model, message=""):
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
total_params = sum(p.numel() for p in model.parameters())
print(f"{message} Trainable params: {trainable_params} of {total_params}")
def extract_embeddings(model, data_loader, save_path, device, preprocess=None):
embeddings_list = []
targets_list = []
total_batches = len(data_loader)
with torch.no_grad(), tqdm(total=total_batches) as pbar:
model.eval() # Set the model to evaluation mode
for images, targets in data_loader:
if preprocess:
print("required processing")
images = preprocess(images).squeeze()
images = images.to(device)
# print("image shape is: ", images.shape)
embeddings = model(images)
embeddings_list.append(embeddings.cpu().detach().numpy()) # Move to CPU and convert to NumPy
targets_list.append(targets.numpy()) # Convert targets to NumPy
pbar.update(1)
# Concatenate embeddings and targets from all batches
embeddings = np.concatenate(embeddings_list).squeeze()
targets = np.concatenate(targets_list)
num_embeddings = embeddings.shape[1]
column_names = [f"feat_{i}" for i in range(num_embeddings)]
column_names.append("label")
embeddings_with_targets = np.hstack((embeddings, np.expand_dims(targets, axis=1)))
# Create a DataFrame with column names
df = pd.DataFrame(embeddings_with_targets, columns=column_names)
df.to_csv(save_path, index=False)
model_names = sorted(name for name in models.__dict__
if name.islower()
and not name.startswith("__") and not name.startswith('get_') and not name.startswith('list_')
and callable(models.__dict__[name]))
model_names
# Load the model
if model_name in MODEL_CONSTRUCTORS:
model_constructor = MODEL_CONSTRUCTORS[model_name]
if model_name == "vit_h_14":
from torchvision.io import read_image
from torchvision.models import vit_h_14, ViT_H_14_Weights
# Step 1: Initialize model with the best available weights
weights = ViT_H_14_Weights.IMAGENET1K_SWAG_E2E_V1.DEFAULT
model = vit_h_14(weights=weights)
model = torch.nn.Sequential(*(list(model.children())[:-1]))
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
if model_name =="regnet_y_128gf":
from torchvision.io import read_image
from torchvision.models import regnet_y_128gf, RegNet_Y_128GF_Weights
# Step 1: Initialize model with the best available weights
weights = RegNet_Y_128GF_Weights.IMAGENET1K_SWAG_E2E_V1
model = regnet_y_128gf(weights=weights)
model = torch.nn.Sequential(*(list(model.children())[:-1]))
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
if model_name =="mobilenet_v3_large":
from torchvision.io import read_image
from torchvision.models import mobilenet_v3_large, MobileNet_V3_Large_Weights
# Step 1: Initialize model with the best available weights
weights = MobileNet_V3_Large_Weights.IMAGENET1K_V2
model = mobilenet_v3_large(weights=weights)
model = torch.nn.Sequential(*(list(model.children())[:-1]))
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
else:
model = model_constructor(pretrained=True, progress=True)
model = torch.nn.Sequential(*(list(model.children())[:-1]))
preprocess=None
else:
raise ValueError("Invalid model type specified.")
# Set to evaluation model.
model.eval()
model.to(device)
# Extract embeddings for training data
extract_embeddings(model, data_loader_train, f'{train_path}/train_embeddings.csv', device, preprocess)
# Extract embeddings for validation data
extract_embeddings(model, data_loader_val, f'{val_path}/val_embeddings.csv', device, preprocess)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
top = QuantumCircuit(1)
top.x(0);
bottom = QuantumCircuit(2)
bottom.cry(0.2, 0, 1);
tensored = bottom.tensor(top)
tensored.draw('mpl')
|
https://github.com/qiskit-community/community.qiskit.org
|
qiskit-community
|
%matplotlib inline
# useful additional packages
#import math tools
import numpy as np
# We import the tools to handle general Graphs
import networkx as nx
# We import plotting tools
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# importing Qiskit
from qiskit import Aer, IBMQ
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
from qiskit.visualization import plot_histogram
# Generating the butterfly graph with 5 nodes
n = 5
V = np.arange(0,n,1)
E =[(0,1,1.0),(0,2,1.0),(1,2,1.0),(3,2,1.0),(3,4,1.0),(4,2,1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
# Generate plot of the Graph
colors = ['r' for node in G.nodes()]
default_axes = plt.axes(frameon=True)
pos = nx.spring_layout(G)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=1, ax=default_axes, pos=pos)
# Evaluate the function
step_size = 0.1;
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma,a_beta)
F1 = 3-(np.sin(2*a_beta)**2*np.sin(2*a_gamma)**2-0.5*np.sin(4*a_beta)*np.sin(4*a_gamma))*(1+np.cos(4*a_gamma)**2)
# Grid search for the minimizing variables
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0],result[1]))[0]
gamma = a[0]*step_size;
beta = a[1]*step_size;
# Plot the expetation value F1
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(a_gamma, a_beta, F1, cmap=cm.coolwarm, linewidth=0, antialiased=True)
ax.set_zlim(1,4)
ax.zaxis.set_major_locator(LinearLocator(3))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.show()
#The smallest paramters and the expectation can be extracted
print('\n --- OPTIMAL PARAMETERS --- \n')
print('The maximal expectation value is: M1 = %.03f' % np.amax(F1))
print('This is attained for gamma = %.03f and beta = %.03f' % (gamma,beta))
# preapre the quantum and classical resisters
QAOA = QuantumCircuit(len(V), len(V))
# apply the layer of Hadamard gates to all qubits
QAOA.h(range(len(V)))
QAOA.barrier()
# apply the Ising type gates with angle gamma along the edges in E
for edge in E:
k = edge[0]
l = edge[1]
QAOA.cu1(-2*gamma, k, l)
QAOA.u1(gamma, k)
QAOA.u1(gamma, l)
# then apply the single qubit X - rotations with angle beta to all qubits
QAOA.barrier()
QAOA.rx(2*beta, range(len(V)))
# Finally measure the result in the computational basis
QAOA.barrier()
QAOA.measure(range(len(V)),range(len(V)))
### draw the circuit for comparison
QAOA.draw(output='mpl')
# Compute the value of the cost function
def cost_function_C(x,G):
E = G.edges()
if( len(x) != len(G.nodes())):
return np.nan
C = 0;
for index in E:
e1 = index[0]
e2 = index[1]
w = G[e1][e2]['weight']
C = C + w*x[e1]*(1-x[e2]) + w*x[e2]*(1-x[e1])
return C
# run on local simulator
backend = Aer.get_backend("qasm_simulator")
shots = 10000
simulate = execute(QAOA, backend=backend, shots=shots)
QAOA_results = simulate.result()
plot_histogram(QAOA_results.get_counts(),figsize = (8,6),bar_labels = False)
# Evaluate the data from the simulator
counts = QAOA_results.get_counts()
avr_C = 0
max_C = [0,0]
hist = {}
for k in range(len(G.edges())+1):
hist[str(k)] = hist.get(str(k),0)
for sample in list(counts.keys()):
# use sampled bit string x to compute C(x)
x = [int(num) for num in list(sample)]
tmp_eng = cost_function_C(x,G)
# compute the expectation value and energy distribution
avr_C = avr_C + counts[sample]*tmp_eng
hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample]
# save best bit string
if( max_C[1] < tmp_eng):
max_C[0] = sample
max_C[1] = tmp_eng
M1_sampled = avr_C/shots
print('\n --- SIMULATION RESULTS ---\n')
print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1)))
print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1]))
print('The cost function is distributed as: \n')
plot_histogram(hist,figsize = (8,6),bar_labels = False)
# Use the IBMQ essex device
provider = IBMQ.load_account()
backend = provider.get_backend('ibmq_essex')
shots = 2048
job_exp = execute(QAOA, backend=backend, shots=shots)
job_monitor(job_exp)
exp_results = job_exp.result()
plot_histogram(exp_results.get_counts(),figsize = (10,8),bar_labels = False)
# Evaluate the data from the experiment
counts = exp_results.get_counts()
avr_C = 0
max_C = [0,0]
hist = {}
for k in range(len(G.edges())+1):
hist[str(k)] = hist.get(str(k),0)
for sample in list(counts.keys()):
# use sampled bit string x to compute C(x)
x = [int(num) for num in list(sample)]
tmp_eng = cost_function_C(x,G)
# compute the expectation value and energy distribution
avr_C = avr_C + counts[sample]*tmp_eng
hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample]
# save best bit string
if( max_C[1] < tmp_eng):
max_C[0] = sample
max_C[1] = tmp_eng
M1_sampled = avr_C/shots
print('\n --- EXPERIMENTAL RESULTS ---\n')
print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1)))
print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1]))
print('The cost function is distributed as: \n')
plot_histogram(hist,figsize = (8,6),bar_labels = False)
import qiskit
qiskit.__qiskit_version__
|
https://github.com/mmetcalf14/Hamiltonian_Downfolding_IBM
|
mmetcalf14
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import logging
import math
import numpy as np
from sklearn.utils import shuffle
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.aqua import Pluggable, PluggableType, get_pluggable_class, AquaError
from qiskit.aqua.components.feature_maps import FeatureMap
from qiskit.aqua.utils import get_feature_dimension
from qiskit.aqua.utils import map_label_to_class_name
from qiskit.aqua.utils import split_dataset_to_data_and_labels
from qiskit.aqua.utils import find_regs_by_name
from qiskit.aqua.algorithms.adaptive.vq_algorithm import VQAlgorithm
logger = logging.getLogger(__name__)
def assign_label(measured_key, num_classes):
"""
Classes = 2:
- If odd number of qubits we use majority vote
- If even number of qubits we use parity
Classes = 3
- We use part-parity
{ex. for 2 qubits: [00], [01,10], [11] would be the three labels}
Args:
measured_key (str): measured key
num_classes (int): number of classes
"""
measured_key = np.asarray([int(k) for k in list(measured_key)])
num_qubits = len(measured_key)
if num_classes == 2:
if num_qubits % 2 != 0:
total = np.sum(measured_key)
return 1 if total > num_qubits / 2 else 0
else:
hamming_weight = np.sum(measured_key)
is_odd_parity = hamming_weight % 2
return is_odd_parity
elif num_classes == 3:
first_half = int(np.floor(num_qubits / 2))
modulo = num_qubits % 2
# First half of key
hamming_weight_1 = np.sum(measured_key[0:first_half + modulo])
# Second half of key
hamming_weight_2 = np.sum(measured_key[first_half + modulo:])
is_odd_parity_1 = hamming_weight_1 % 2
is_odd_parity_2 = hamming_weight_2 % 2
return is_odd_parity_1 + is_odd_parity_2
else:
total_size = 2**num_qubits
class_step = np.floor(total_size / num_classes)
decimal_value = measured_key.dot(1 << np.arange(measured_key.shape[-1] - 1, -1, -1))
key_order = int(decimal_value / class_step)
return key_order if key_order < num_classes else num_classes - 1
def cost_estimate(probs, gt_labels, shots=None):
"""Calculate cross entropy
# shots is kept since it may be needed in future.
Args:
shots (int): the number of shots used in quantum computing
probs (numpy.ndarray): NxK array, N is the number of data and K is the number of class
gt_labels (numpy.ndarray): Nx1 array
Returns:
float: cross entropy loss between estimated probs and gt_labels
"""
mylabels = np.zeros(probs.shape)
for i in range(gt_labels.shape[0]):
whichindex = gt_labels[i]
mylabels[i][whichindex] = 1
def cross_entropy(predictions, targets, epsilon=1e-12):
predictions = np.clip(predictions, epsilon, 1. - epsilon)
N = predictions.shape[0]
tmp = np.sum(targets*np.log(predictions), axis=1)
ce = -np.sum(tmp)/N
return ce
x = cross_entropy(probs, mylabels)
return x
def cost_estimate_sigmoid(shots, probs, gt_labels):
"""Calculate sigmoid cross entropy
Args:
shots (int): the number of shots used in quantum computing
probs (numpy.ndarray): NxK array, N is the number of data and K is the number of class
gt_labels (numpy.ndarray): Nx1 array
Returns:
float: sigmoid cross entropy loss between estimated probs and gt_labels
"""
#Error in the order of parameters corrected below - 19 Dec 2018
#x = cost_estimate(shots, probs, gt_labels)
x = cost_estimate(probs, gt_labels, shots)
loss = (1.) / (1. + np.exp(-x))
return loss
def return_probabilities(counts, num_classes):
"""Return the probabilities of given measured counts
Args:
counts ([dict]): N data and each with a dict recording the counts
num_classes (int): number of classes
Returns:
numpy.ndarray: NxK array
"""
probs = np.zeros(((len(counts), num_classes)))
for idx in range(len(counts)):
count = counts[idx]
shots = sum(count.values())
for k, v in count.items():
label = assign_label(k, num_classes)
probs[idx][label] += v / shots
return probs
class VQC(VQAlgorithm):
CONFIGURATION = {
'name': 'VQC',
'description': 'Variational Quantum Classifier',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'vqc_schema',
'type': 'object',
'properties': {
'override_SPSA_params': {
'type': 'boolean',
'default': True
},
'max_evals_grouped': {
'type': 'integer',
'default': 1
},
'minibatch_size': {
'type': 'integer',
'default': -1
}
},
'additionalProperties': False
},
'problems': ['classification'],
'depends': [
{
'pluggable_type': 'optimizer',
'default': {
'name': 'SPSA'
},
},
{
'pluggable_type': 'feature_map',
'default': {
'name': 'SecondOrderExpansion',
'depth': 2
},
},
{
'pluggable_type': 'variational_form',
'default': {
'name': 'RYRZ',
'depth': 3
},
},
],
}
def __init__(
self,
optimizer=None,
feature_map=None,
var_form=None,
training_dataset=None,
test_dataset=None,
datapoints=None,
max_evals_grouped=1,
minibatch_size=-1,
callback=None
):
"""Initialize the object
Args:
optimizer (Optimizer): The classical optimizer to use.
feature_map (FeatureMap): The FeatureMap instance to use.
var_form (VariationalForm): The variational form instance.
training_dataset (dict): The training dataset, in the format: {'A': np.ndarray, 'B': np.ndarray, ...}.
test_dataset (dict): The test dataset, in same format as `training_dataset`.
datapoints (np.ndarray): NxD array, N is the number of data and D is data dimension.
max_evals_grouped (int): The maximum number of evaluations to perform simultaneously.
minibatch_size (int): The size of a mini-batch.
callback (Callable): a callback that can access the intermediate data during the optimization.
Internally, four arguments are provided as follows the index of data batch, the index of evaluation,
parameters of variational form, evaluated value.
Notes:
We use `label` to denotes numeric results and `class` the class names (str).
"""
self.validate(locals())
super().__init__(
var_form=var_form,
optimizer=optimizer,
cost_fn=self._cost_function_wrapper
)
self._optimizer.set_max_evals_grouped(max_evals_grouped)
self._callback = callback
if feature_map is None:
raise AquaError('Missing feature map.')
if training_dataset is None:
raise AquaError('Missing training dataset.')
self._training_dataset, self._class_to_label = split_dataset_to_data_and_labels(
training_dataset)
self._label_to_class = {label: class_name for class_name, label
in self._class_to_label.items()}
self._num_classes = len(list(self._class_to_label.keys()))
if test_dataset is not None:
self._test_dataset = split_dataset_to_data_and_labels(test_dataset,
self._class_to_label)
else:
self._test_dataset = test_dataset
if datapoints is not None and not isinstance(datapoints, np.ndarray):
datapoints = np.asarray(datapoints)
self._datapoints = datapoints
self._minibatch_size = minibatch_size
self._eval_count = 0
self._ret = {}
self._feature_map = feature_map
self._num_qubits = feature_map.num_qubits
@classmethod
def init_params(cls, params, algo_input):
algo_params = params.get(Pluggable.SECTION_KEY_ALGORITHM)
override_spsa_params = algo_params.get('override_SPSA_params')
max_evals_grouped = algo_params.get('max_evals_grouped')
minibatch_size = algo_params.get('minibatch_size')
# Set up optimizer
opt_params = params.get(Pluggable.SECTION_KEY_OPTIMIZER)
# If SPSA then override SPSA params as reqd to our predetermined values
if opt_params['name'] == 'SPSA' and override_spsa_params:
opt_params['c0'] = 4.0
opt_params['c1'] = 0.1
opt_params['c2'] = 0.602
opt_params['c3'] = 0.101
opt_params['c4'] = 0.0
opt_params['skip_calibration'] = True
optimizer = get_pluggable_class(PluggableType.OPTIMIZER,
opt_params['name']).init_params(params)
# Set up feature map
fea_map_params = params.get(Pluggable.SECTION_KEY_FEATURE_MAP)
feature_dimension = get_feature_dimension(algo_input.training_dataset)
fea_map_params['feature_dimension'] = feature_dimension
feature_map = get_pluggable_class(PluggableType.FEATURE_MAP,
fea_map_params['name']).init_params(params)
# Set up variational form, we need to add computed num qubits
# Pass all parameters so that Variational Form can create its dependents
var_form_params = params.get(Pluggable.SECTION_KEY_VAR_FORM)
var_form_params['num_qubits'] = feature_map.num_qubits
var_form = get_pluggable_class(PluggableType.VARIATIONAL_FORM,
var_form_params['name']).init_params(params)
return cls(optimizer, feature_map, var_form, algo_input.training_dataset,
algo_input.test_dataset, algo_input.datapoints, max_evals_grouped,
minibatch_size)
def construct_circuit(self, x, theta, measurement=False):
"""
Construct circuit based on data and parameters in variational form.
Args:
x (numpy.ndarray): 1-D array with D dimension
theta ([numpy.ndarray]): list of 1-D array, parameters sets for variational form
measurement (bool): flag to add measurement
Returns:
QuantumCircuit: the circuit
"""
qr = QuantumRegister(self._num_qubits, name='q')
cr = ClassicalRegister(self._num_qubits, name='c')
qc = QuantumCircuit(qr, cr)
qc += self._feature_map.construct_circuit(x, qr)
qc += self._var_form.construct_circuit(theta, qr)
if measurement:
qc.barrier(qr)
qc.measure(qr, cr)
return qc
def _get_prediction(self, data, theta):
"""
Make prediction on data based on each theta.
Args:
data (numpy.ndarray): 2-D array, NxD, N data points, each with D dimension
theta ([numpy.ndarray]): list of 1-D array, parameters sets for variational form
Returns:
numpy.ndarray or [numpy.ndarray]: list of NxK array
numpy.ndarray or [numpy.ndarray]: list of Nx1 array
"""
# if self._quantum_instance.is_statevector:
# raise ValueError('Selected backend "{}" is not supported.'.format(
# self._quantum_instance.backend_name))
circuits = {}
circuit_id = 0
num_theta_sets = len(theta) // self._var_form.num_parameters
theta_sets = np.split(theta, num_theta_sets)
for theta in theta_sets:
for datum in data:
if self._quantum_instance.is_statevector:
circuit = self.construct_circuit(datum, theta, measurement=False)
else:
circuit = self.construct_circuit(datum, theta, measurement=True)
circuits[circuit_id] = circuit
circuit_id += 1
results = self._quantum_instance.execute(list(circuits.values()))
circuit_id = 0
predicted_probs = []
predicted_labels = []
for _ in theta_sets:
counts = []
for _ in data:
if self._quantum_instance.is_statevector:
temp = results.get_statevector(circuits[circuit_id])
outcome_vector = (temp * temp.conj()).real
# convert outcome_vector to outcome_dict, where key is a basis state and value is the count.
# Note: the count can be scaled linearly, i.e., it does not have to be an integer.
outcome_dict = {}
bitstr_size = int(math.log2(len(outcome_vector)))
for i in range(len(outcome_vector)):
bitstr_i = format(i, '0' + str(bitstr_size) + 'b')
outcome_dict[bitstr_i] = outcome_vector[i]
else:
outcome_dict = results.get_counts(circuits[circuit_id])
counts.append(outcome_dict)
circuit_id += 1
probs = return_probabilities(counts, self._num_classes)
predicted_probs.append(probs)
predicted_labels.append(np.argmax(probs, axis=1))
if len(predicted_probs) == 1:
predicted_probs = predicted_probs[0]
if len(predicted_labels) == 1:
predicted_labels = predicted_labels[0]
return predicted_probs, predicted_labels
# Breaks data into minibatches. Labels are optional, but will be broken into batches if included.
def batch_data(self, data, labels=None, minibatch_size=-1):
label_batches = None
if 0 < minibatch_size < len(data):
batch_size = min(minibatch_size, len(data))
if labels is not None:
shuffled_samples, shuffled_labels = shuffle(data, labels, random_state=self.random)
label_batches = np.array_split(shuffled_labels, batch_size)
else:
shuffled_samples = shuffle(data, random_state=self.random)
batches = np.array_split(shuffled_samples, batch_size)
else:
batches = np.asarray([data])
label_batches = np.asarray([labels])
return batches, label_batches
def is_gradient_really_supported(self):
return self.optimizer.is_gradient_supported and not self.optimizer.is_gradient_ignored
def train(self, data, labels, quantum_instance=None, minibatch_size=-1):
"""Train the models, and save results.
Args:
data (numpy.ndarray): NxD array, N is number of data and D is dimension
labels (numpy.ndarray): Nx1 array, N is number of data
quantum_instance (QuantumInstance): quantum backend with all setting
minibatch_size (int): the size of each minibatched accuracy evalutation
"""
self._quantum_instance = self._quantum_instance if quantum_instance is None else quantum_instance
minibatch_size = minibatch_size if minibatch_size > 0 else self._minibatch_size
self._batches, self._label_batches = self.batch_data(data, labels, minibatch_size)
self._batch_index = 0
if self.initial_point is None:
self.initial_point = self.random.randn(self._var_form.num_parameters)
self._eval_count = 0
grad_fn = None
if minibatch_size > 0 and self.is_gradient_really_supported(): # we need some wrapper
grad_fn = self._gradient_function_wrapper
self._ret = self.find_minimum(
initial_point=self.initial_point,
var_form=self.var_form,
cost_fn=self._cost_function_wrapper,
optimizer=self.optimizer,
gradient_fn = grad_fn # func for computing gradient
)
if self._ret['num_optimizer_evals'] is not None and self._eval_count >= self._ret['num_optimizer_evals']:
self._eval_count = self._ret['num_optimizer_evals']
self._eval_time = self._ret['eval_time']
logger.info('Optimization complete in {} seconds.\nFound opt_params {} in {} evals'.format(
self._eval_time, self._ret['opt_params'], self._eval_count))
self._ret['eval_count'] = self._eval_count
del self._batches
del self._label_batches
del self._batch_index
self._ret['training_loss'] = self._ret['min_val']
# temporary fix: this code should be unified with the gradient api in optimizer.py
def _gradient_function_wrapper(self, theta):
"""Compute and return the gradient at the point theta.
Args:
theta (numpy.ndarray): 1-d array
Returns:
numpy.ndarray: 1-d array with the same shape as theta. The gradient computed
"""
epsilon = 1e-8
f_orig = self._cost_function_wrapper(theta)
grad = np.zeros((len(theta),), float)
for k in range(len(theta)):
theta[k] += epsilon
f_new = self._cost_function_wrapper(theta)
grad[k] = (f_new - f_orig) / epsilon
theta[k] -= epsilon # recover to the center state
if self.is_gradient_really_supported():
self._batch_index += 1 # increment the batch after gradient callback
return grad
def _cost_function_wrapper(self, theta):
batch_index = self._batch_index % len(self._batches)
predicted_probs, predicted_labels = self._get_prediction(self._batches[batch_index], theta)
total_cost = []
if not isinstance(predicted_probs, list):
predicted_probs = [predicted_probs]
for i in range(len(predicted_probs)):
curr_cost = cost_estimate(predicted_probs[i], self._label_batches[batch_index])
total_cost.append(curr_cost)
if self._callback is not None:
self._callback(
self._eval_count,
theta[i * self._var_form.num_parameters:(i + 1) * self._var_form.num_parameters],
curr_cost,
self._batch_index
)
self._eval_count += 1
if not self.is_gradient_really_supported():
self._batch_index += 1 # increment the batch after eval callback
logger.debug('Intermediate batch cost: {}'.format(sum(total_cost)))
return total_cost if len(total_cost) > 1 else total_cost[0]
def test(self, data, labels, quantum_instance=None, minibatch_size=-1, params=None):
"""Predict the labels for the data, and test against with ground truth labels.
Args:
data (numpy.ndarray): NxD array, N is number of data and D is data dimension
labels (numpy.ndarray): Nx1 array, N is number of data
quantum_instance (QuantumInstance): quantum backend with all setting
minibatch_size (int): the size of each minibatched accuracy evalutation
params (list): list of parameters to populate in the variational form
Returns:
float: classification accuracy
"""
# minibatch size defaults to setting in instance variable if not set
minibatch_size = minibatch_size if minibatch_size > 0 else self._minibatch_size
batches, label_batches = self.batch_data(data, labels, minibatch_size)
self.batch_num = 0
if params is None:
params = self.optimal_params
total_cost = 0
total_correct = 0
total_samples = 0
self._quantum_instance = self._quantum_instance if quantum_instance is None else quantum_instance
for batch, label_batch in zip(batches, label_batches):
predicted_probs, predicted_labels = self._get_prediction(batch, params)
total_cost += cost_estimate(predicted_probs, label_batch)
total_correct += np.sum((np.argmax(predicted_probs, axis=1) == label_batch))
total_samples += label_batch.shape[0]
int_accuracy = np.sum((np.argmax(predicted_probs, axis=1) == label_batch)) / label_batch.shape[0]
logger.debug('Intermediate batch accuracy: {:.2f}%'.format(int_accuracy * 100.0))
total_accuracy = total_correct / total_samples
logger.info('Accuracy is {:.2f}%'.format(total_accuracy * 100.0))
self._ret['testing_accuracy'] = total_accuracy
self._ret['test_success_ratio'] = total_accuracy
self._ret['testing_loss'] = total_cost / len(batches)
return total_accuracy
def predict(self, data, quantum_instance=None, minibatch_size=-1, params=None):
"""Predict the labels for the data.
Args:
data (numpy.ndarray): NxD array, N is number of data, D is data dimension
quantum_instance (QuantumInstance): quantum backend with all setting
minibatch_size (int): the size of each minibatched accuracy evalutation
params (list): list of parameters to populate in the variational form
Returns:
list: for each data point, generates the predicted probability for each class
list: for each data point, generates the predicted label (that with the highest prob)
"""
# minibatch size defaults to setting in instance variable if not set
minibatch_size = minibatch_size if minibatch_size > 0 else self._minibatch_size
batches, _ = self.batch_data(data, None, minibatch_size)
if params is None:
params = self.optimal_params
predicted_probs = None
predicted_labels = None
self._quantum_instance = self._quantum_instance if quantum_instance is None else quantum_instance
for i, batch in enumerate(batches):
if len(batches) > 0:
logger.debug('Predicting batch {}'.format(i))
batch_probs, batch_labels = self._get_prediction(batch, params)
if not predicted_probs and not predicted_labels:
predicted_probs = batch_probs
predicted_labels = batch_labels
else:
np.concatenate((predicted_probs, batch_probs))
np.concatenate((predicted_labels, batch_labels))
self._ret['predicted_probs'] = predicted_probs
self._ret['predicted_labels'] = predicted_labels
return predicted_probs, predicted_labels
def _run(self):
self.train(self._training_dataset[0], self._training_dataset[1])
if self._test_dataset is not None:
self.test(self._test_dataset[0], self._test_dataset[1])
if self._datapoints is not None:
predicted_probs, predicted_labels = self.predict(self._datapoints)
self._ret['predicted_classes'] = map_label_to_class_name(predicted_labels,
self._label_to_class)
return self._ret
def get_optimal_cost(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot return optimal cost before running the algorithm to find optimal params.")
return self._ret['min_val']
def get_optimal_circuit(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot find optimal circuit before running the algorithm to find optimal params.")
return self._var_form.construct_circuit(self._ret['opt_params'])
def get_optimal_vector(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot find optimal vector before running the algorithm to find optimal params.")
qc = self.get_optimal_circuit()
if self._quantum_instance.is_statevector:
ret = self._quantum_instance.execute(qc)
self._ret['min_vector'] = ret.get_statevector(qc, decimals=16)
else:
c = ClassicalRegister(qc.width(), name='c')
q = find_regs_by_name(qc, 'q')
qc.add_register(c)
qc.barrier(q)
qc.measure(q, c)
ret = self._quantum_instance.execute(qc)
self._ret['min_vector'] = ret.get_counts(qc)
return self._ret['min_vector']
@property
def optimal_params(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot find optimal params before running the algorithm.")
return self._ret['opt_params']
@property
def ret(self):
return self._ret
@ret.setter
def ret(self, new_value):
self._ret = new_value
@property
def label_to_class(self):
return self._label_to_class
@property
def class_to_label(self):
return self._class_to_label
def load_model(self, file_path):
model_npz = np.load(file_path)
self._ret['opt_params'] = model_npz['opt_params']
def save_model(self, file_path):
model = {'opt_params': self._ret['opt_params']}
np.savez(file_path, **model)
@property
def test_dataset(self):
return self._test_dataset
@property
def training_dataset(self):
return self._training_dataset
@property
def datapoints(self):
return self._datapoints
|
https://github.com/qiskit-community/community.qiskit.org
|
qiskit-community
|
# initialization
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# importing Qiskit
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
nQubits = 2 # number of physical qubits used to represent s
s = 3 # the hidden integer
# make sure that a can be represented with nqubits
s = s % 2**(nQubits)
# Creating registers
# qubits for querying the oracle and finding the hidden integer
qr = QuantumRegister(nQubits)
# bits for recording the measurement on qr
cr = ClassicalRegister(nQubits)
bvCircuit = QuantumCircuit(qr, cr)
barriers = True
# Apply Hadamard gates before querying the oracle
for i in range(nQubits):
bvCircuit.h(qr[i])
# Apply barrier
if barriers:
bvCircuit.barrier()
# Apply the inner-product oracle
for i in range(nQubits):
if (s & (1 << i)):
bvCircuit.z(qr[i])
else:
bvCircuit.iden(qr[i])
# Apply barrier
if barriers:
bvCircuit.barrier()
#Apply Hadamard gates after querying the oracle
for i in range(nQubits):
bvCircuit.h(qr[i])
# Apply barrier
if barriers:
bvCircuit.barrier()
# Measurement
bvCircuit.measure(qr, cr)
bvCircuit.draw(output='mpl')
# use local simulator
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(bvCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
# Load our saved IBMQ accounts and get the least busy backend device with less than or equal to 5 qubits
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits <= 5 and
not x.configuration().simulator and x.status().operational==True))
print("least busy backend: ", backend)
# Run our circuit on the least busy backend. Monitor the execution of the job in the queue
from qiskit.tools.monitor import job_monitor
shots = 1024
job = execute(bvCircuit, backend=backend, shots=shots)
job_monitor(job, interval = 2)
# Get the results from the computation
results = job.result()
answer = results.get_counts()
plot_histogram(answer)
import qiskit
qiskit.__qiskit_version__
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
from qiskit.quantum_info import Statevector
from qiskit.visualization import plot_bloch_multivector
qc = QuantumCircuit(2)
qc.h(0)
qc.x(1)
# You can reverse the order of the qubits.
from qiskit.quantum_info import DensityMatrix
qc = QuantumCircuit(2)
qc.h([0, 1])
qc.t(1)
qc.s(0)
qc.cx(0,1)
matrix = DensityMatrix(qc)
plot_bloch_multivector(matrix, title='My Bloch Spheres', reverse_bits=True)
|
https://github.com/JayRGopal/Quantum-Error-Correction
|
JayRGopal
|
from qiskit import Aer, IBMQ, transpile
from qiskit.utils import QuantumInstance
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes.calibration import RZXCalibrationBuilderNoEcho
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer
from qiskit_nature.mappers.second_quantization import ParityMapper
from qiskit_nature.algorithms import GroundStateEigensolver
from qiskit_nature.runtime import VQEClient
from qiskit.algorithms import NumPyMinimumEigensolver, VQE
from qiskit.algorithms.optimizers import SPSA
from qiskit.circuit import QuantumCircuit, ParameterVector
from qiskit.utils import QuantumInstance
from qiskit.providers.aer import AerSimulator
import matplotlib.pyplot as plt
import numpy as np
backend = AerSimulator()
def HEA_naive(num_q, depth):
circuit = QuantumCircuit(num_q)
params = ParameterVector("theta", length=num_q * (3 * depth + 2))
counter = 0
for q in range(num_q):
circuit.rx(params[counter], q)
counter += 1
circuit.rz(params[counter], q)
counter += 1
for d in range(depth):
for q in range(num_q - 1):
circuit.cx(q, q + 1)
for q in range(num_q):
circuit.rz(params[counter], q)
counter += 1
circuit.rx(params[counter], q)
counter += 1
circuit.rz(params[counter], q)
counter += 1
return circuit, params
def HEA_aware(num_q, depth, hardware):
circuit = QuantumCircuit(num_q)
params = ParameterVector("theta", length=num_q * (3 * depth + 2))
counter = 0
for q in range(num_q):
circuit.rx(params[counter], q)
counter += 1
circuit.rz(params[counter], q)
counter += 1
for d in range(depth):
for q in range(num_q - 1):
gate = QuantumCircuit(num_q)
gate.rzx(np.pi/2, q, q + 1)
pass_ = RZXCalibrationBuilderNoEcho(hardware)
qc_cr = PassManager(pass_).run(gate)
circuit.compose(qc_cr, inplace=True)
for q in range(num_q):
circuit.rz(params[counter], q)
counter += 1
circuit.rx(params[counter], q)
counter += 1
circuit.rz(params[counter], q)
counter += 1
return circuit, params
depth = 2
qubits = 4
circuit, _ = HEA_naive(qubits, depth)
spsa = SPSA(100)
qi = QuantumInstance(Aer.get_backend('aer_simulator'))
vqe_circuit = VQE(ansatz=circuit, quantum_instance=qi, optimizer=spsa)
print(circuit)
|
https://github.com/JouziP/MQITE
|
JouziP
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 6 12:15:52 2022
@author: pejmanjouzdani
"""
import numpy as np
from qiskit import QuantumCircuit
################################################################
################################################################
def getRandomU(nspins, num_layers=10):
circ_U = QuantumCircuit(nspins)
for l in range(num_layers):
for i in range(nspins):
##############
q=np.random.randint(nspins)
g=np.random.randint(1, 4)
p=np.random.uniform(-1,1)
if g==1:
circ_U.rx(p,q)
if g==2:
circ_U.ry(p,q)
if g==2:
circ_U.rz(p,q)
##############
q=np.random.randint(nspins-1)
circ_U.cnot(q, q+1)
return circ_U
|
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for drawing of timeline drawer."""
import numpy as np
import qiskit
from qiskit.test import QiskitTestCase
from qiskit.visualization.timeline import drawings, types
class TestDrawingObjects(QiskitTestCase):
"""Tests for drawings."""
def setUp(self) -> None:
"""Setup."""
super().setUp()
# bits
self.qubits = list(qiskit.QuantumRegister(2))
# metadata
self.meta1 = {"val1": 0, "val2": 1}
self.meta2 = {"val1": 2, "val2": 3}
# style data
self.style1 = {"property1": 0, "property2": 1}
self.style2 = {"property1": 2, "property2": 3}
def test_line_data_equivalent(self):
"""Test LineData equivalent check."""
xs = list(np.arange(10))
ys = list(np.ones(10))
obj1 = drawings.LineData(
data_type=types.LineType.BARRIER,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta1,
styles=self.style1,
)
obj2 = drawings.LineData(
data_type=types.LineType.BARRIER,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta2,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
def test_line_data_equivalent_abstract_coord(self):
"""Test LineData equivalent check with abstract coordinate."""
xs = [types.AbstractCoordinate.LEFT, types.AbstractCoordinate.RIGHT]
ys = [types.AbstractCoordinate.BOTTOM, types.AbstractCoordinate.TOP]
obj1 = drawings.LineData(
data_type=types.LineType.BARRIER,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta1,
styles=self.style1,
)
obj2 = drawings.LineData(
data_type=types.LineType.BARRIER,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta2,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
def test_box_data_equivalent(self):
"""Test BoxData equivalent check."""
xs = [0, 1]
ys = [0, 1]
obj1 = drawings.BoxData(
data_type=types.BoxType.SCHED_GATE,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta1,
styles=self.style1,
)
obj2 = drawings.BoxData(
data_type=types.BoxType.SCHED_GATE,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta2,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
def test_box_data_equivalent_abstract_coord(self):
"""Test BoxData equivalent check with abstract coordinate."""
xs = [types.AbstractCoordinate.LEFT, types.AbstractCoordinate.RIGHT]
ys = [types.AbstractCoordinate.BOTTOM, types.AbstractCoordinate.TOP]
obj1 = drawings.BoxData(
data_type=types.BoxType.SCHED_GATE,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta1,
styles=self.style1,
)
obj2 = drawings.BoxData(
data_type=types.BoxType.SCHED_GATE,
bit=self.qubits[0],
xvals=xs,
yvals=ys,
meta=self.meta2,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
def test_text_data_equivalent(self):
"""Test TextData equivalent check."""
obj1 = drawings.TextData(
data_type=types.LabelType.GATE_NAME,
bit=self.qubits[0],
xval=0,
yval=0,
text="test",
latex="test",
meta=self.meta1,
styles=self.style1,
)
obj2 = drawings.TextData(
data_type=types.LabelType.GATE_NAME,
bit=self.qubits[0],
xval=0,
yval=0,
text="test",
latex="test",
meta=self.meta2,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
def test_text_data_equivalent_abstract_coord(self):
"""Test TextData equivalent check with abstract coordinate."""
obj1 = drawings.TextData(
data_type=types.LabelType.GATE_NAME,
bit=self.qubits[0],
xval=types.AbstractCoordinate.LEFT,
yval=types.AbstractCoordinate.BOTTOM,
text="test",
latex="test",
meta=self.meta1,
styles=self.style1,
)
obj2 = drawings.TextData(
data_type=types.LabelType.GATE_NAME,
bit=self.qubits[0],
xval=types.AbstractCoordinate.LEFT,
yval=types.AbstractCoordinate.BOTTOM,
text="test",
latex="test",
meta=self.meta2,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
def test_bit_link_data_equivalent(self):
"""Test BitLinkData equivalent check."""
obj1 = drawings.GateLinkData(
bits=[self.qubits[0], self.qubits[1]], xval=0, styles=self.style1
)
obj2 = drawings.GateLinkData(
bits=[self.qubits[0], self.qubits[1]], xval=0, styles=self.style2
)
self.assertEqual(obj1, obj2)
def test_bit_link_data_equivalent_abstract_coord(self):
"""Test BitLinkData equivalent check with abstract coordinate."""
obj1 = drawings.GateLinkData(
bits=[self.qubits[0], self.qubits[1]],
xval=types.AbstractCoordinate.LEFT,
styles=self.style1,
)
obj2 = drawings.GateLinkData(
bits=[self.qubits[0], self.qubits[1]],
xval=types.AbstractCoordinate.LEFT,
styles=self.style2,
)
self.assertEqual(obj1, obj2)
|
https://github.com/jonasmaziero/computacao_quantica_qiskit
|
jonasmaziero
| |
https://github.com/tomtuamnuq/compare-qiskit-ocean
|
tomtuamnuq
|
import os
import shutil
import time
from docplex.mp.error_handler import DOcplexException
from random_lp.random_qp import RandomQP
DIR = 'TEST_DATA' + "/" + time.strftime("%d_%m_%Y")
def getPath(filename = "", directory = ""):
return DIR + "/" + directory + "/" + filename
DIR
shutil.rmtree(getPath(directory = "SPARSE"), ignore_errors=True)
os.makedirs(getPath(directory = "SPARSE"))
# create sparse random binary quadratic Programs
# 3 variables with 2 constraints each
max_qubits = 290
var = 3
cstr = 2
multiple = 10
while True:
qp_bin = RandomQP.create_random_binary_prog("test_sparse_" + str(multiple), cstr, var, multiple=multiple)
try:
qp_bin.write_to_lp_file(getPath(qp_bin.name, directory = "SPARSE"))
if qp_bin.complexity() > max_qubits :
print(multiple)
break
if qp_bin.complexity() > 100 :
multiple = multiple + 6
else:
multiple = multiple + 3
except DOcplexException as ex:
print(ex)
print(qp_bin.complexity())
qp_bin.qubo.to_docplex().prettyprint()
|
https://github.com/abbarreto/qiskit4
|
abbarreto
|
from qiskit import *
import numpy as np
import math
import qiskit
nshots = 8192
IBMQ.load_account()
provider = qiskit.IBMQ.get_provider(hub='ibm-q', group='open', project='main')
device = provider.get_backend('ibmq_manila')
simulator = Aer.get_backend('qasm_simulator')
from qiskit.tools.monitor import job_monitor
#from qiskit.ignis.verification.tomography import state_tomography_circuits, StateTomographyFitter
#from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
from qiskit.visualization import plot_histogram
def qc_bb84():
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
qc = QuantumCircuit(qr,cr)
qc.h([1,2])
qc.measure([1,2],[1,2])
qc.x(0).c_if(cr[1],1)
qc.h(0).c_if(cr[2],1)
qc.barrier()
qc.barrier()
qc.h(3)
qc.measure(qr[3],cr[3])
qc.h(0).c_if(cr[3],1)
qc.measure(0,0)
return qc
qc_bb84_ = qc_bb84(); qc_bb84_.draw('mpl')
nshots = 1
N = 100
counts = []
for j in range(0,N):
job_sim = execute(qc, backend=simulator, shots=nshots)
counts_sim = job_sim.result().get_counts(qc)
counts.append(counts_sim)
counts[0:5]
counts_keys = [j for j in counts]
counts_keys
k=0;l=1;m=0;n=0
s= str(k)+str(l)+str(m)+str(n)
s
eo = [] # observavel escolhido por Alice e Bob (mesmo = 0, diferente=1)
for j in range(0,N):
for k in range(0,2):
for l in range(0,2):
for m in range(0,2):
for n in range(0,2):
s = str(n) + str(m) + str(l) + str(k)
if counts[j][s] in counts and counts[j][s] == 1:
if l==0 and m==0:
eo.append(0)
else:
eo.append(1)
eo
qr = QuantumRegister(4)
|
https://github.com/indian-institute-of-science-qc/qiskit-aakash
|
indian-institute-of-science-qc
|
# Copyright 2022-2023 Ohad Lev.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0,
# or in the root directory of this package("LICENSE.txt").
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `util.py` module."""
import unittest
from datetime import datetime
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from sat_circuits_engine.util import flatten_circuit, timestamp
class UtilTest(unittest.TestCase):
def test_timestamp(self):
"""Test for the `timestamp` function."""
self.assertEqual(timestamp(datetime(2022, 12, 3, 17, 0, 45, 0)), "D03.12.22_T17.00.45")
def test_flatten_circuit(self):
"""Test for the `flatten_circuit` function."""
bits_1 = 2
bits_2 = 3
qreg_1 = QuantumRegister(bits_1)
qreg_2 = QuantumRegister(bits_2)
creg_1 = ClassicalRegister(bits_1)
creg_2 = ClassicalRegister(bits_2)
circuit = QuantumCircuit(qreg_1, qreg_2, creg_1, creg_2)
self.assertEqual(circuit.num_qubits, bits_1 + bits_2)
self.assertEqual(circuit.num_clbits, bits_1 + bits_2)
self.assertEqual(len(circuit.qregs), 2)
self.assertEqual(len(circuit.cregs), 2)
flat_circuit = flatten_circuit(circuit)
self.assertEqual(flat_circuit.num_qubits, bits_1 + bits_2)
self.assertEqual(flat_circuit.num_clbits, bits_1 + bits_2)
self.assertEqual(len(flat_circuit.qregs), 1)
self.assertEqual(len(flat_circuit.cregs), 1)
if __name__ == "__main__":
unittest.main()
|
https://github.com/nielsaNTNU/qiskit_utilities
|
nielsaNTNU
|
import numpy as np
import os
import datetime
import time
import pickle
from qiskit import *
from qiskit.providers.jobstatus import JOB_FINAL_STATES, JobStatus
def start_or_retrieve_job(filename, backend, circuit=None, options=None):
"""function that
1) retrieves the job from the backend if saved to file,
2) or executes a job on a backend and saves it to file
Parameters
----------
filename : string
The filename to write/read from. The extension ".job" is
automatically appended to the string.
backend : qiskit.providers.ibmq.ibmqbackend.IBMQBackend
The backend where the job has been/is to be executed.
circuit : qiskit.circuit.quantumcircuit.QuantumCircuit, optional
The circuit that is to be executed.
options: dict, optional
The following is a list of all options and their default value
options={'shots': 1024, 'forcererun': False, 'useapitoken': False, 'directory': 'jobs'}
the directory is created if it does not exist
Returns
-------
job : qiskit.providers.ibmq.job.ibmqjob.IBMQJob,
qiskit.providers.aer.aerjob.AerJob
"""
### options parsing
if options == None:
options={}
shots = options.get('shots', 1024)
forcererun = options.get('forcererun', False)
useapitoken = options.get('useapitoken', False)
directory = options.get('directory', 'jobs')
filename = filename+'.job'
if not os.path.exists(directory):
os.makedirs(directory)
if not(forcererun) and os.path.isfile(directory+'/'+filename):
#read job id from file and retrieve the job
with open(directory+'/'+filename, 'r') as f:
apitoken = f.readline().rstrip()
backendname = f.readline().rstrip()
job_id = f.readline().rstrip()
if useapitoken:
IBMQ.save_account(apitoken, overwrite=True)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend_tmp = provider.get_backend(backendname)
if backend.name() != backend_tmp.name():
raise Exception("The backend of the job was "+backend_tmp.name()+", but you requested "+backend.name())
job = backend_tmp.retrieve_job(job_id)
else:
job = backend.retrieve_job(job_id)
else:
# otherwise start the job and write the id to file
hasnotrun = True
while hasnotrun:
error = False
try:
job = execute(circuit, backend, shots=int(shots))
except Exception as e:
error = True
sec = 60
if "Error code: 3458" in str(e):
print(filename +' No credits available, retry in '+str(sec)+' seconds'+', time='+str(datetime.datetime.now()), end='\r')
else:
print('{j} Error! Code: {c}, Message: {m}, Time {t}'.format(j=str(filename), c = type(e).__name__, m = str(e), t=str(datetime.datetime.now())), ", retry in ",str(sec),' seconds', end='\r')
time.sleep(sec)
if not(error):
hasnotrun = False
job_id = job.job_id()
apitoken = IBMQ.active_account()['token']
backendname = backend.name()
if job_id != '':
file = open(directory+'/'+filename,'w')
file.write(apitoken+'\n')
file.write(backendname+'\n')
file.write(job_id)
file.close()
return job
def write_results(filename, job, options=None):
"""function that writes the results of a job to file
Parameters
----------
filename : string
The filename to write to. The extension ".result" is
automatically appended to the string.
job : qiskit.providers.ibmq.job.ibmqjob.IBMQJob,
qiskit.providers.aer.aerjob.AerJob
The job to get the results from
options: dict, optional
The following is a list of all options and their default value
options={'overwrite': False, 'directory': 'results'}
Returns
-------
success : bool
set to True if the results from the job are written to file
it is set to False, e.g., if the job has not yet finished successfully
"""
### options parsing
if options == None:
options={}
overwrite = options.get('overwrite', False)
directory = options.get('directory', 'results')
filename=filename+'.result'
if not os.path.exists(directory):
os.makedirs(directory)
success = False
fileexists = os.path.isfile(directory+'/'+filename)
if (fileexists and overwrite) or not(fileexists):
jobstatus = job.status()
if jobstatus == JobStatus.DONE:
res=job.result().results
tmpfile = open(directory+'/'+filename,'wb')
pickle.dump(res,tmpfile)
tmpfile.close()
success = True
return success
def read_results(filename, options=None):
"""function that reads results from file
Parameters
----------
filename : string
The filename to read from. The extension ".result" is
automatically appended to the string.
options: dict, optional
The following is a list of all options and their default value
options={'directory': 'results'}
Returns
-------
results : Object
the form is dictated by job.result().results
can be None, if the file does not exist
success : bool
set to True if the results
"""
### options parsing
if options == None:
options={}
directory = options.get('directory', 'results')
filename=filename+'.result'
results = None
if os.path.isfile(directory+'/'+filename):
tmpfile = open(directory+'/'+filename,'rb')
results=pickle.load(tmpfile)
tmpfile.close()
return results
def get_id_error_rate(backend):
errorrate=[]
gates=backend.properties().gates
for i in range(0,len(gates)):
if getattr(gates[i],'gate') == 'id':
gerror = getattr(getattr(gates[i],'parameters')[0], 'value')
errorrate.append(gerror)
return errorrate
def get_U3_error_rate(backend):
errorrate=[]
gates=backend.properties().gates
for i in range(0,len(gates)):
if getattr(gates[i],'gate') == 'u3':
gerror = getattr(getattr(gates[i],'parameters')[0], 'value')
errorrate.append(gerror)
return errorrate
def get_T1(backend):
val=[]
unit=[]
gates=backend.properties().gates
for i in range(backend.configuration().n_qubits):
qubit=backend.properties().qubits[i][0]
assert qubit.name == 'T1'
val.append(qubit.value)
unit.append(qubit.unit)
return val, unit
def get_T2(backend):
val=[]
unit=[]
gates=backend.properties().gates
for i in range(backend.configuration().n_qubits):
qubit=backend.properties().qubits[i][1]
assert qubit.name == 'T2'
val.append(qubit.value)
unit.append(qubit.unit)
return val, unit
def get_readouterrors(backend):
val=[]
gates=backend.properties().gates
for i in range(backend.configuration().n_qubits):
qubit=backend.properties().qubits[i][3]
assert qubit.name == 'readout_error'
val.append(qubit.value)
return val
def get_prob_meas0_prep1(backend):
val=[]
gates=backend.properties().gates
for i in range(backend.configuration().n_qubits):
qubit=backend.properties().qubits[i][4]
assert qubit.name == 'prob_meas0_prep1'
val.append(qubit.value)
return val
def get_prob_meas1_prep0(backend):
val=[]
gates=backend.properties().gates
for i in range(backend.configuration().n_qubits):
qubit=backend.properties().qubits[i][5]
assert qubit.name == 'prob_meas1_prep0'
val.append(qubit.value)
return val
def get_cx_error_map(backend):
"""
function that returns a 2d array containing CX error rates.
"""
num_qubits=backend.configuration().n_qubits
two_qubit_error_map = np.zeros((num_qubits,num_qubits))
backendproperties=backend.properties()
gates=backendproperties.gates
for i in range(0,len(gates)):
if getattr(gates[i],'gate') == 'cx':
cxname = getattr(gates[i],'name')
error = getattr(getattr(gates[i],'parameters')[0], 'value')
#print(cxname, error)
for p in range(num_qubits):
for q in range(num_qubits):
if p==q:
continue
if cxname == 'cx'+str(p)+'_'+str(q):
two_qubit_error_map[p][q] = error
break
return two_qubit_error_map
def getNumberOfControlledGates(circuit):
"""function that returns the number of CX, CY, CZ gates.
N.B.: swap gates are counted as 3 CX gates.
"""
numCx=0
numCy=0
numCz=0
for instr, qargs, cargs in circuit.data:
gate_string = instr.qasm()
if gate_string == "swap":
numCx += 3
elif gate_string == "cx":
numCx += 1
elif gate_string == "cy":
numCy += 1
elif gate_string == "cz":
numCz += 1
return numCx, numCy, numCz
def convert_to_binarystring(results):
list=[]
for item in range(0,len(results)):
dict={}
co = results[item].data.counts
for i in range(0,2**5):
if(hasattr(co,hex(i))):
binstring="{0:b}".format(i).zfill(5)
counts = getattr(co, hex(i))
dict[binstring] = counts
list.append(dict)
return list
|
https://github.com/qiskit-community/prototype-zne
|
qiskit-community
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022-2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from collections.abc import Sequence
from itertools import count, product
from unittest.mock import Mock
from numpy import array
from pytest import fixture, mark, raises, warns
from qiskit import QuantumCircuit
from qiskit.circuit.random import random_circuit
from qiskit.primitives import EstimatorResult
from zne.extrapolation import Extrapolator, LinearExtrapolator
from zne.noise_amplification import NoiseAmplifier
from zne.noise_amplification.folding_amplifier import MultiQubitAmplifier
from zne.zne_strategy import ZNEStrategy
from . import NO_ITERS_NONE, NO_NONE
################################################################################
## FIXTURES
################################################################################
@fixture(scope="function")
def amplifier_mock():
"""NoiseAmplifier mock object."""
amplifier = Mock(NoiseAmplifier)
amplifier.amplify_circuit_noise.side_effect = count()
return amplifier
@fixture(scope="function")
def extrapolator_mock():
"""Extrapolator mock object."""
def infer(target, data):
_, y, _ = zip(*data)
return 1, 0, {}
extrapolator = Mock(Extrapolator)
extrapolator.infer.side_effect = infer
return extrapolator
################################################################################
## TESTS
################################################################################
def test_definition():
assert ZNEStrategy._DEFINING_ATTRS == (
"noise_factors",
"noise_amplifier",
"extrapolator",
)
class TestInit:
"""Test ZNEStrategy initialization logic."""
def test_defaults(self):
"""Test default configuration."""
assert ZNEStrategy().noise_amplifier == MultiQubitAmplifier()
assert ZNEStrategy().noise_factors == (1,)
assert ZNEStrategy().extrapolator == LinearExtrapolator()
@mark.parametrize(
"noise_factors",
[(1,), (1, 3), (1, 3, 5)],
)
def test_custom(
self,
noise_factors,
):
"""Test custom configuration.
Proper inputs can be assumed since validation is tested separately.
"""
noise_amplifier = Mock(NoiseAmplifier)
extrapolator = Mock(Extrapolator)
zne_strategy = ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=noise_amplifier,
extrapolator=extrapolator,
)
assert zne_strategy.noise_factors == noise_factors
assert zne_strategy.noise_amplifier is noise_amplifier
assert zne_strategy.extrapolator is extrapolator
@mark.parametrize(
"noise_factors",
[(1,), (1, 3), (1, 3, 5)],
)
class TestMagic:
"""Test generic ZNEStrategy magic methods."""
def test_repr(self, noise_factors):
"""Test ZNEStrategy.__repr__() magic method."""
noise_amplifier = Mock(NoiseAmplifier)
extrapolator = Mock(Extrapolator)
zne_strategy = ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=noise_amplifier,
extrapolator=extrapolator,
)
expected = "ZNEStrategy("
expected += f"noise_factors={repr(noise_factors)}, "
expected += f"noise_amplifier={repr(noise_amplifier)}, "
expected += f"extrapolator={repr(extrapolator)})"
assert repr(zne_strategy) == expected
def test_eq(self, noise_factors):
"""Test ZNEStrategy.__eq__() magic method."""
noise_amplifier = Mock(NoiseAmplifier)
extrapolator = Mock(Extrapolator)
zne_strategy = ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=noise_amplifier,
extrapolator=extrapolator,
)
assert zne_strategy == ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=noise_amplifier,
extrapolator=extrapolator,
)
assert zne_strategy != ZNEStrategy(
noise_factors=(*noise_factors, 707),
noise_amplifier=noise_amplifier,
extrapolator=extrapolator,
)
assert zne_strategy != ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=Mock(NoiseAmplifier),
extrapolator=extrapolator,
)
assert zne_strategy != ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=noise_amplifier,
extrapolator=Mock(Extrapolator),
)
assert zne_strategy != "zne_strategy"
def test_bool(self, noise_factors):
noise_amplifier = Mock(NoiseAmplifier)
extrapolator = Mock(Extrapolator)
zne_strategy = ZNEStrategy(
noise_factors=noise_factors,
noise_amplifier=noise_amplifier,
extrapolator=extrapolator,
)
truth_value = not zne_strategy.is_noop
assert bool(zne_strategy) is truth_value
class TestConstructors:
"""Test ZNEStrategy constructors."""
def test_noop(self):
zne_strategy = ZNEStrategy.noop()
assert zne_strategy.is_noop
class TestNoiseFactors:
"""Test ZNEStrategy `noise_factors` property."""
def test_default(self):
zne_strategy = ZNEStrategy()
zne_strategy.noise_factors = None
assert zne_strategy.noise_factors == (1,)
@mark.parametrize(
"noise_factors",
cases := [(1,), (3,), (1, 3), (1, 3, 5), [1, 3, 5], [1.2, 3, 5.4]],
ids=[f"{nf}" for nf in cases],
)
def test_dispatch(self, noise_factors):
"""Test proper noise factors of different types."""
zne_strategy = ZNEStrategy()
zne_strategy.noise_factors = noise_factors
assert zne_strategy.noise_factors == tuple(noise_factors)
@mark.parametrize(
"noise_factors, expected",
cases := list(
zip(
[(1, 5, 3), (3.3, 1.2, 5.4)],
[(1, 3, 5), (1.2, 3.3, 5.4)],
)
),
ids=[f"{nf}" for nf, _ in cases],
)
def test_sort(self, noise_factors, expected):
"""Test unsorted noise factors."""
zne_strategy = ZNEStrategy()
with warns(UserWarning):
zne_strategy.noise_factors = noise_factors
assert zne_strategy.noise_factors == expected
@mark.parametrize(
"noise_factors, expected",
cases := list(
zip(
[(1, 1), (1, 3, 1, 5), (5, 5, 3), (2.4, 2.4)],
[(1,), (1, 3, 5), (3, 5), (2.4,)],
)
),
ids=[f"{nf}" for nf, _ in cases],
)
def test_duplicates(self, noise_factors, expected):
"""Test duplicate noise factors."""
zne_strategy = ZNEStrategy()
with warns(UserWarning):
zne_strategy.noise_factors = noise_factors
assert zne_strategy.noise_factors == expected
@mark.parametrize(
"noise_factors",
cases := NO_ITERS_NONE,
ids=[f"{type(c)}" for c in cases],
)
def test_sequence(self, noise_factors):
"""Test type error is raised if noise factors are not Sequence."""
zne_strategy = ZNEStrategy()
with raises(TypeError):
zne_strategy.noise_factors = noise_factors
@mark.parametrize(
"noise_factors",
cases := [(), []],
ids=[f"{type(c)}" for c in cases],
)
def test_empty(self, noise_factors):
"""Test value error is raised for empty lists of noise factors."""
zne_strategy = ZNEStrategy()
with raises(ValueError):
zne_strategy.noise_factors = noise_factors
@mark.parametrize(
"noise_factors",
cases := ["1", True, False, float("NaN"), [1, 3, "5"]],
ids=[f"{type(c)}" for c in cases],
)
def test_real(self, noise_factors):
"""Test type error is raised if noise factors are not real numbers."""
if not isinstance(noise_factors, Sequence):
noise_factors = [noise_factors]
zne_strategy = ZNEStrategy()
with raises(TypeError):
zne_strategy.noise_factors = noise_factors
@mark.parametrize(
"noise_factors",
cases := [0, 0.9999, -1, -0.5, (1, 0), (0.9, 1.2)],
ids=[f"{c}" for c in cases],
)
def test_geq_one(self, noise_factors):
"""Test value error is raised if any noise factor is less than one."""
if not isinstance(noise_factors, Sequence):
noise_factors = [noise_factors]
zne_strategy = ZNEStrategy()
with raises(ValueError):
zne_strategy.noise_factors = noise_factors
class TestNoiseAmplifier:
"""Test ZNEStrategy `noise_amplifier` property."""
def test_default(self):
zne_strategy = ZNEStrategy()
zne_strategy.noise_amplifier = None
assert zne_strategy.noise_amplifier == MultiQubitAmplifier()
@mark.parametrize(
"noise_amplifier",
cases := NO_NONE,
ids=[f"{type(c)}" for c in cases],
)
def test_type_error(self, noise_amplifier):
"""Test type error is raised if not `NoiseAmplifier`."""
zne_strategy = ZNEStrategy()
with raises(TypeError):
zne_strategy.noise_amplifier = noise_amplifier
class TestExtrapolator:
"""Test ZNEStrategy `extrapolator` property."""
def test_default(self):
zne_strategy = ZNEStrategy()
zne_strategy.extrapolator = None
assert zne_strategy.extrapolator == LinearExtrapolator()
@mark.parametrize(
"extrapolator",
cases := NO_NONE,
ids=[f"{type(c)}" for c in cases],
)
def test_type_error(self, extrapolator):
"""Test type error is raised if not `NoiseAmplifier`."""
zne_strategy = ZNEStrategy()
with raises(TypeError):
zne_strategy.extrapolator = extrapolator
class TestProperties:
"""Test generic ZNEStrategy properties."""
@mark.parametrize("noise_factors", [(1,), (1, 3), (1.2,), (2.1, 4.5)])
def test_performs_noise_amplification(self, noise_factors):
"""Test if ZNEStrategy performs noise amplification."""
zne_strategy = ZNEStrategy(noise_factors=noise_factors)
truth_value = any(nf > 1 for nf in noise_factors)
if truth_value:
assert zne_strategy.performs_noise_amplification
else:
assert not zne_strategy.performs_noise_amplification
@mark.parametrize("noise_factors", [(1,), (1, 3), (1.2,), (2.1, 4.5)])
def test_performs_zne(self, noise_factors):
"""Test if ZNEStrategy performs zero noise extrapolation."""
zne_strategy = ZNEStrategy(noise_factors=noise_factors)
truth_value = any(nf > 1 for nf in noise_factors) and len(noise_factors) > 1
if truth_value:
assert zne_strategy.performs_zne
else:
assert not zne_strategy.performs_zne
@mark.parametrize("noise_factors", [(1,), (1, 3), (1.2,), (2.1, 4.5)])
def test_is_noop(self, noise_factors):
"""Test if ZNEStrategy is no-op."""
zne_strategy = ZNEStrategy(noise_factors=noise_factors)
truth_value = tuple(noise_factors) == (1,)
if truth_value:
assert zne_strategy.is_noop
else:
assert not zne_strategy.is_noop
class TestNoiseAmplification:
"""Test ZNEStrategy noise amplification logic."""
def test_amplify_circuit_noise(self, amplifier_mock):
noise_factors = (1, 2, 3)
zne_strategy = ZNEStrategy(noise_factors=noise_factors, noise_amplifier=amplifier_mock)
circuit = QuantumCircuit(2)
assert zne_strategy.amplify_circuit_noise(circuit, 1) == 0
amplifier_mock.amplify_circuit_noise.assert_called_once_with(circuit, 1)
amplifier_mock.amplify_circuit_noise.reset_mock()
assert zne_strategy.amplify_circuit_noise(circuit, 1.2) == 1
amplifier_mock.amplify_circuit_noise.assert_called_once_with(circuit, 1.2)
circuit.h(0)
amplifier_mock.amplify_circuit_noise.reset_mock()
assert zne_strategy.amplify_circuit_noise(circuit, 2.4) == 2
amplifier_mock.amplify_circuit_noise.assert_called_once_with(circuit, 2.4)
@mark.parametrize(
"circuits, noise_factors",
cases := tuple(
product(
[
random_circuit(1, 1, seed=0),
[],
[random_circuit(2, 2, seed=5)],
[random_circuit(2, 2, seed=66), random_circuit(2, 2, seed=1081)],
],
[[1], [1, 3], [1, 3, 5]],
)
),
ids=[f"{type(c).__name__}<{len(c)}>-{nf}" for c, nf in cases],
)
def test_build_noisy_circuits(self, amplifier_mock, circuits, noise_factors):
zne_strategy = ZNEStrategy(noise_factors=noise_factors, noise_amplifier=amplifier_mock)
_ = zne_strategy.build_noisy_circuits(circuits)
if isinstance(circuits, QuantumCircuit):
circuits = [circuits]
assert amplifier_mock.amplify_circuit_noise.call_count == len(circuits) * len(noise_factors)
for circuit in circuits:
for noise_factor in noise_factors:
amplifier_mock.amplify_circuit_noise.assert_any_call(circuit, noise_factor)
@mark.parametrize(
"num_noise_factors, arg, expected",
cases := [
(1, None, None),
(1, 0, (0,)),
(1, [0], (0,)),
(1, [1], (1,)),
(1, [2], (2,)),
(1, [0, 1], (0, 1)),
(1, [0, 2], (0, 2)),
(1, [1, 2], (1, 2)),
(1, [0, 1, 2], (0, 1, 2)),
(2, None, None),
(2, 0, (0, 0)),
(2, [0], (0, 0)),
(2, [1], (1, 1)),
(2, [2], (2, 2)),
(2, [0, 1], (0, 0, 1, 1)),
(2, [0, 2], (0, 0, 2, 2)),
(2, [1, 2], (1, 1, 2, 2)),
(2, [0, 1, 2], (0, 0, 1, 1, 2, 2)),
(3, None, None),
(3, 0, (0, 0, 0)),
(3, [0], (0, 0, 0)),
(3, [1], (1, 1, 1)),
(3, [2], (2, 2, 2)),
(3, [0, 1], (0, 0, 0, 1, 1, 1)),
(3, [0, 2], (0, 0, 0, 2, 2, 2)),
(3, [1, 2], (1, 1, 1, 2, 2, 2)),
(3, [0, 1, 2], (0, 0, 0, 1, 1, 1, 2, 2, 2)),
],
ids=[f"noise<{nnf}>-{id}" for nnf, id, _ in cases],
)
def test_map_to_noisy_circuits(self, num_noise_factors, arg, expected):
zne_strategy = ZNEStrategy(noise_factors=[n for n in range(1, num_noise_factors + 1)])
assert zne_strategy.map_to_noisy_circuits(arg) == expected
class TestExtrapolation:
"""Test ZNEStrategy extrapolation logic."""
@mark.parametrize(
"noise_factors, values, variances, num_results, extrapolate_return",
[
([1, 2, 3], [1, 2, 3], [0, 0, 0], 1, [0, 1, {}]),
([1, 2, 3], [1, 2, 3], [0, 0, 0], 1, [0, 1, {"R2": 0.1}]),
([1, 2, 3], [1, 2, 3], [0, 0, 0], 2, [0, 1, {}]),
([1, 2, 3], [1, 2, 3], [0, 0, 0], 3, [0, 1, {"R2": 0.1, "P": 6.5}]),
],
)
def test_mitigate_noisy_result(
self, noise_factors, values, variances, num_results, extrapolate_return
):
val, err, meta = extrapolate_return
extrapolator = Mock(Extrapolator)
extrapolator.extrapolate_zero = Mock(return_value=tuple(extrapolate_return))
zne_strategy = ZNEStrategy(noise_factors=noise_factors)
zne_strategy.extrapolator = extrapolator
metadata = [{"variance": var, "shots": 1024} for var in variances]
noisy_result = EstimatorResult(
values=array(values * num_results), metadata=list(metadata * num_results)
)
result = zne_strategy.mitigate_noisy_result(noisy_result)
assert result.values.tolist() == [val] * num_results
metadatum = {
"noise_amplification": {
"noise_amplifier": zne_strategy.noise_amplifier,
"noise_factors": tuple(noise_factors),
"values": tuple(values),
"variance": tuple(variances),
"shots": tuple([md["shots"] for md in metadata]),
},
"extrapolation": {
"extrapolator": zne_strategy.extrapolator,
**meta,
},
}
assert result.metadata == [{"std_error": err, "zne": metadatum} for _ in range(num_results)]
@mark.parametrize(
"num_noise_factors, values, variances",
cases := [
(1, [0], [0]),
(1, [0, 1], [0, 1]),
(2, [0, 0], [0, 0]),
(2, [0, 0, 1, 1], [0, 0, 1, 1]),
(3, [0, 0, 0], [0, 0, 0]),
(3, [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]),
],
ids=[f"nf<{nnf}>-val<{len(val)}>" for nnf, val, _ in cases],
)
def test_generate_noisy_result_groups(self, num_noise_factors, values, variances):
zne_strategy = ZNEStrategy(noise_factors=range(1, num_noise_factors + 1))
metadata = [{"variance": var} for var in variances]
result = EstimatorResult(values=array(values), metadata=metadata)
assert (
len(tuple(zne_strategy._generate_noisy_result_groups(result)))
== len(values) / num_noise_factors
)
for i, group in enumerate(zne_strategy._generate_noisy_result_groups(result)):
lower = num_noise_factors * i
upper = num_noise_factors * (i + 1)
assert group.values.tolist() == values[lower:upper]
assert group.metadata == metadata[lower:upper]
@mark.parametrize(
"num_noise_factors, num_experiments",
cases := [(2, 1), (2, 3), (3, 2), (3, 4)],
ids=[f"nf{nnf}-val{ne}" for nnf, ne in cases],
)
def test_generate_noisy_result_groups_value_error(self, num_noise_factors, num_experiments):
zne_strategy = ZNEStrategy(noise_factors=range(1, num_noise_factors + 1))
values = array([0] * num_experiments)
metadata = [{"variance": 0.0}] * num_experiments
result = EstimatorResult(values=values, metadata=metadata)
with raises(ValueError):
generator = zne_strategy._generate_noisy_result_groups(result)
assert next(generator)
@mark.parametrize(
"noise_factors, values, std_errors",
[
([1, 2], [0, 1], [0, 0]),
([1, 2.0], [0.4, 1.2], [0, None]),
],
)
def test_regression_data_from_result_group(self, noise_factors, values, std_errors):
zne_strategy = ZNEStrategy(noise_factors=noise_factors)
metadatum = {"shots": 1024}
metadata = [
{"variance": err**2, **metadatum} if err is not None else metadatum
for err in std_errors
]
result_group = EstimatorResult(values=array(values), metadata=list(metadata))
data = zne_strategy._regression_data_from_result_group(result_group)
expected = (
noise_factors,
values,
[1 for _ in noise_factors],
[1 if err is None else err for err in std_errors],
)
for dat, exp in zip(data, expected):
assert dat == exp
@mark.parametrize(
"num_noise_factors, num_experiments",
cases := [(1, 2), (2, 1), (3, 1), (2, 3)],
ids=[f"nf<{nnf}>-experiments<{ne}>" for nnf, ne in cases],
)
def test_regression_data_from_result_group_value_error(
self, num_noise_factors, num_experiments
):
zne_strategy = ZNEStrategy(noise_factors=range(1, num_noise_factors + 1))
values = [1] * num_experiments
metadata = [{"variance": 0}] * num_experiments
result_group = EstimatorResult(values=array(values), metadata=list(metadata))
with raises(ValueError):
zne_strategy._regression_data_from_result_group(result_group)
@mark.parametrize(
"noise_factors, values, metadata, extrapolation, expected_na",
[
(
[1, 2],
[1, 1],
[{"variance": 0}, {"variance": 0}],
{},
{"noise_factors": (1, 2), "values": (1, 1), "variance": (0, 0)},
),
(
[1, 2],
[1, 0],
[{"variance": 0.1}, {"variance": 0.4}],
{"R2": 0.98},
{"noise_factors": (1, 2), "values": (1, 0), "variance": (0.1, 0.4)},
),
(
[1, 2, 3],
[0, 1.5, 2.4],
[
{"variance": 0.11, "shots": 2048},
{"variance": 0.1, "shots": 1024},
{"variance": 0.12, "shots": 4096},
],
{"R2": 0.44, "P": 6.5},
{
"noise_factors": (1, 2, 3),
"values": (0, 1.5, 2.4),
"variance": (0.11, 0.1, 0.12),
"shots": (2048, 1024, 4096),
},
),
(
[1, 2, 3],
[0, 1.5, 2.4],
[
{"variance": 0.11, "shots": 2048},
{"variance": 0.1, "shots": 1024, "backend": "ibmq-nugget"},
{"variance": 0.12, "shots": 4096, "seconds": 3600},
],
{"R2": 0.44, "P": 6.5},
{
"noise_factors": (1, 2, 3),
"values": (0, 1.5, 2.4),
"variance": (0.11, 0.1, 0.12),
"shots": (2048, 1024, 4096),
"backend": (None, "ibmq-nugget", None),
"seconds": (None, None, 3600),
},
),
],
)
def test_build_zne_metadata(self, noise_factors, values, metadata, extrapolation, expected_na):
zne_strategy = ZNEStrategy(noise_factors=noise_factors)
result_group = EstimatorResult(values=array(values), metadata=list(metadata))
computed = zne_strategy.build_zne_metadata(result_group, extrapolation)
expected_na = {
"noise_amplifier": zne_strategy.noise_amplifier,
**expected_na,
}
expected_ex = {
"extrapolator": zne_strategy.extrapolator,
**extrapolation,
}
assert computed.get("noise_amplification") == expected_na
assert computed.get("extrapolation") == expected_ex
@mark.parametrize(
"num_noise_factors, num_experiments",
cases := [(1, 2), (2, 1), (3, 1), (2, 3)],
ids=[f"nf<{nnf}>-experiments<{ne}>" for nnf, ne in cases],
)
def test_build_zne_metadata_value_error(self, num_noise_factors, num_experiments):
zne_strategy = ZNEStrategy(noise_factors=range(1, num_noise_factors + 1))
values = array([1] * num_experiments)
metadata = [{"variance": 0}] * num_experiments
result_group = EstimatorResult(values=values, metadata=metadata)
with raises(ValueError):
zne_strategy.build_zne_metadata(result_group)
|
https://github.com/GIRISHBELANI/QC_Benchmarks_using_dm-simulator
|
GIRISHBELANI
|
"""
Hamiltonian-Simulation (Transverse Field Ising Model) Benchmark Program - Qiskit
"""
import sys
sys.path[1:1] = ["_common", "_common/qiskit"]
sys.path[1:1] = ["../../_common", "../../_common/qiskit"]
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
import time
import math
import numpy as np
np.random.seed(0)
import execute as ex
import metrics as metrics
from collections import defaultdict
verbose = False
# saved circuits and subcircuits for display
QC_ = None
ZZ_ = None
############### Circuit Definition
def HamiltonianSimulation(n_spins, K, t, method):
'''
Construct a Qiskit circuit for Hamiltonian Simulation
:param n_spins:The number of spins to simulate
:param K: The Trotterization order
:param t: duration of simulation
:param method: whether the circuit simulates the TFIM in paramagnetic or ferromagnetic phase
:return: return a Qiskit circuit for this Hamiltonian
'''
# strength of transverse field
if method == 1:
g = 20.0 # g >> 1 -> paramagnetic phase
else:
g = 0.1 # g << 1 -> ferromagnetic phase
# allocate qubits
qr = QuantumRegister(n_spins); cr = ClassicalRegister(n_spins); qc = QuantumCircuit(qr, cr, name="main")
# define timestep based on total runtime and number of Trotter steps
tau = t / K
# initialize state to approximate eigenstate when deep into phases of TFIM
if abs(g) > 1: # paramagnetic phase
# start with initial state of |++...> (eigenstate in x-basis)
for k in range(n_spins):
qc.h(qr[k])
if abs(g) < 1: # ferromagnetic phase
# state with initial state of GHZ state: 1/sqrt(2) ( |00...> + |11...> )
qc.h(qr[0])
for k in range(1, n_spins):
qc.cnot(qr[k-1], qr[k])
qc.barrier()
# loop over each trotter step, adding gates to the circuit defining the Hamiltonian
for k in range(K):
# the Pauli spin vector product
for i in range(n_spins):
qc.rx(2 * tau * g, qr[i])
qc.barrier()
# ZZ operation on each pair of qubits in linear chain
for j in range(2):
for i in range(j%2, n_spins, 2):
qc.append(zz_gate(tau).to_instruction(), [qr[i], qr[(i + 1) % n_spins]])
qc.barrier()
# transform state back to computational basis |00000>
if abs(g) > 1: # paramagnetic phase
# reverse transformation from |++...> (eigenstate in x-basis)
for k in range(n_spins):
qc.h(qr[k])
if abs(g) < 1: # ferromagnetic phase
# reversed tranformation from GHZ state
for k in reversed(range(1, n_spins)):
qc.cnot(qr[k-1], qr[k])
qc.h(qr[0])
qc.barrier()
# measure all the qubits used in the circuit
for i_qubit in range(n_spins):
qc.measure(qr[i_qubit], cr[i_qubit])
# save smaller circuit example for display
global QC_
if QC_ == None or n_spins <= 4:
if n_spins < 9: QC_ = qc
return qc
############### exp(ZZ) Gate Implementations
# Simple exp(ZZ) gate on q0 and q1 with angle 'tau'
def zz_gate(tau):
qr = QuantumRegister(2); qc = QuantumCircuit(qr, name="zz_gate")
qc.cx(qr[0], qr[1])
qc.rz(np.pi*tau, qr[1])
qc.cx(qr[0], qr[1])
# save circuit example for display
global ZZ_
ZZ_ = qc
return qc
############### Result Data Analysis
# Analyze and print measured results
# Compute the quality of the result based on operator expectation for each state
def analyze_and_print_result(qc, result, num_qubits, type, num_shots):
counts = result.get_counts(qc)
if verbose: print(f"For type {type} measured: {counts}")
correct_state = '0'*num_qubits
fidelity = 0
if correct_state in counts.keys():
fidelity = counts[correct_state] / num_shots
return counts, fidelity
################ Benchmark Loop
# Execute program with default parameters
def run(min_qubits=2, max_qubits=8, max_circuits=3, num_shots=100, method=1,
backend_id='qasm_simulator', provider_backend=None,
hub="ibm-q", group="open", project="main"):
print("Hamiltonian-Simulation (Transverse Field Ising Model) Benchmark Program - Qiskit")
print(f"... using circuit method {method}")
# validate parameters (smallest circuit is 2 qubits)
max_qubits = max(2, max_qubits)
min_qubits = min(max(2, min_qubits), max_qubits)
if min_qubits % 2 == 1: min_qubits += 1 # min_qubits must be even
#print(f"min, max qubits = {min_qubits} {max_qubits}")
# Initialize metrics module
metrics.init_metrics()
# Define custom result handler
def execution_handler(qc, result, num_qubits, type, num_shots):
# determine fidelity of result set
num_qubits = int(num_qubits)
counts, expectation_a = analyze_and_print_result(qc, result, num_qubits, type, num_shots)
metrics.store_metric(num_qubits, type, 'fidelity', expectation_a)
# Initialize execution module using the execution result handler above and specified backend_id
ex.init_execution(execution_handler)
ex.set_execution_target(backend_id, provider_backend=provider_backend,
hub=hub, group=group, project=project)
# Execute Benchmark Program N times for multiple circuit sizes
# Accumulate metrics asynchronously as circuits complete
for input_size in range(min_qubits, max_qubits + 1, 2):
# determine number of circuits to execute for this group
num_circuits = max_circuits
num_qubits = input_size
print(f"************\nExecuting [{num_circuits}] circuits with num_qubits = {num_qubits}")
# parameters of simulation
t = 1 # time of simulation, 1 is chosen so that the dynamics are not completely trivial
k = int(5*num_qubits*t) # Trotter error.
# A large Trotter order approximates the Hamiltonian evolution better.
# But a large Trotter order also means the circuit is deeper.
# For ideal or noise-less quantum circuits, k >> 1 gives perfect hamiltonian simulation.
for circuit_id in range(num_circuits):
# create the circuit for given qubit size and simulation parameters, store time metric
ts = time.time()
h_x = 2 * np.random.random(num_qubits) - 1 # random numbers between [-1, 1]
h_z = 2 * np.random.random(num_qubits) - 1
qc = HamiltonianSimulation(num_qubits, K=k, t=t, method=method)
metrics.store_metric(num_qubits, circuit_id, 'create_time', time.time() - ts)
# collapse the sub-circuits used in this benchmark (for qiskit)
qc2 = qc.decompose()
# submit circuit for execution on target (simulator, cloud simulator, or hardware)
ex.submit_circuit(qc2, num_qubits, circuit_id, num_shots)
# Wait for some active circuits to complete; report metrics when groups complete
ex.throttle_execution(metrics.finalize_group)
# Wait for all active circuits to complete; report metrics when groups complete
ex.finalize_execution(metrics.finalize_group)
# print a sample circuit
print("Sample Circuit:"); print(QC_ if QC_ != None else " ... too large!")
print("\n********\nZZ ="); print(ZZ_)
# Plot metrics for all circuit sizes
metrics.plot_metrics(f"Benchmark Results - Hamiltonian Simulation ({method}) - Qiskit")
# if main, execute method
if __name__ == '__main__': run()
|
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the TemplateOptimization pass."""
import unittest
from test.python.quantum_info.operators.symplectic.test_clifford import random_clifford_circuit
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.quantum_info import Operator
from qiskit.circuit.library.templates.nct import template_nct_2a_2, template_nct_5a_3
from qiskit.circuit.library.templates.clifford import (
clifford_2_1,
clifford_2_2,
clifford_2_3,
clifford_2_4,
clifford_3_1,
clifford_4_1,
clifford_4_2,
)
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.converters.circuit_to_dagdependency import circuit_to_dagdependency
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import TemplateOptimization
from qiskit.transpiler.passes.calibration.rzx_templates import rzx_templates
from qiskit.test import QiskitTestCase
from qiskit.transpiler.exceptions import TranspilerError
def _ry_to_rz_template_pass(parameter: Parameter = None, extra_costs=None):
"""Create a simple pass manager that runs a template optimisation with a single transformation.
It turns ``RX(pi/2).RY(parameter).RX(-pi/2)`` into the equivalent virtual ``RZ`` rotation, where
if ``parameter`` is given, it will be the instance used in the template."""
if parameter is None:
parameter = Parameter("_ry_rz_template_inner")
template = QuantumCircuit(1)
template.rx(-np.pi / 2, 0)
template.ry(parameter, 0)
template.rx(np.pi / 2, 0)
template.rz(-parameter, 0) # pylint: disable=invalid-unary-operand-type
costs = {"rx": 16, "ry": 16, "rz": 0}
if extra_costs is not None:
costs.update(extra_costs)
return PassManager(TemplateOptimization([template], user_cost_dict=costs))
class TestTemplateMatching(QiskitTestCase):
"""Test the TemplateOptimization pass."""
def test_pass_cx_cancellation_no_template_given(self):
"""
Check the cancellation of CX gates for the apply of the three basic
template x-x, cx-cx. ccx-ccx.
"""
qr = QuantumRegister(3)
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
pass_manager = PassManager()
pass_manager.append(TemplateOptimization())
circuit_in_opt = pass_manager.run(circuit_in)
circuit_out = QuantumCircuit(qr)
circuit_out.h(qr[0])
circuit_out.h(qr[0])
self.assertEqual(circuit_in_opt, circuit_out)
def test_pass_cx_cancellation_own_template(self):
"""
Check the cancellation of CX gates for the apply of a self made template cx-cx.
"""
qr = QuantumRegister(2, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
dag_in = circuit_to_dag(circuit_in)
qrt = QuantumRegister(2, "qrc")
qct = QuantumCircuit(qrt)
qct.cx(0, 1)
qct.cx(0, 1)
template_list = [qct]
pass_ = TemplateOptimization(template_list)
dag_opt = pass_.run(dag_in)
circuit_expected = QuantumCircuit(qr)
circuit_expected.h(qr[0])
circuit_expected.h(qr[0])
dag_expected = circuit_to_dag(circuit_expected)
self.assertEqual(dag_opt, dag_expected)
def test_pass_cx_cancellation_template_from_library(self):
"""
Check the cancellation of CX gates for the apply of the library template cx-cx (2a_2).
"""
qr = QuantumRegister(2, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
dag_in = circuit_to_dag(circuit_in)
template_list = [template_nct_2a_2()]
pass_ = TemplateOptimization(template_list)
dag_opt = pass_.run(dag_in)
circuit_expected = QuantumCircuit(qr)
circuit_expected.h(qr[0])
circuit_expected.h(qr[0])
dag_expected = circuit_to_dag(circuit_expected)
self.assertEqual(dag_opt, dag_expected)
def test_pass_template_nct_5a(self):
"""
Verify the result of template matching and substitution with the template 5a_3.
q_0: ββββββββ ββββββββββ βββββ ββ
βββ΄ββ βββ΄ββ β
q_1: βββ βββ€ X ββββ βββ€ X ββββΌββ
βββ΄ββββββββββ΄ββββββββββ΄ββ
q_2: β€ X βββββββ€ X βββββββ€ X β
βββββ βββββ βββββ
The circuit before optimization is:
βββββ βββββ
qr_0: β€ X βββββββββββββββββ€ X ββββββ
βββ¬ββ βββββββββββββ¬ββ
qr_1: βββΌβββββ βββ€ X ββ€ Z ββββΌβββββ ββ
β β βββ¬βββββββ β β
qr_2: βββΌβββββΌβββββ βββββ βββββ βββββΌββ
β β ββββββββ΄ββ β β
qr_3: βββ βββββΌβββ€ H ββ€ X ββββ βββββΌββ
β βββ΄ββββββββββββ βββ΄ββ
qr_4: βββ βββ€ X βββββββββββββββββ€ X β
βββββ βββββ
The match is given by [0,1][1,2][2,7], after substitution the circuit becomes:
βββββ βββββ
qr_0: β€ X βββββββββββββββββ€ X β
βββ¬ββ βββββββββββββ¬ββ
qr_1: βββΌββββββββ€ X ββ€ Z ββββΌββ
β βββ¬βββββββ β
qr_2: βββΌβββββ βββββ βββββ βββββ ββ
β β ββββββββ΄ββ β
qr_3: βββ βββββΌβββ€ H ββ€ X ββββ ββ
β βββ΄ββββββββββββ
qr_4: βββ βββ€ X ββββββββββββββββ
βββββ
"""
qr = QuantumRegister(5, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.ccx(qr[3], qr[4], qr[0])
circuit_in.cx(qr[1], qr[4])
circuit_in.cx(qr[2], qr[1])
circuit_in.h(qr[3])
circuit_in.z(qr[1])
circuit_in.cx(qr[2], qr[3])
circuit_in.ccx(qr[2], qr[3], qr[0])
circuit_in.cx(qr[1], qr[4])
dag_in = circuit_to_dag(circuit_in)
template_list = [template_nct_5a_3()]
pass_ = TemplateOptimization(template_list)
dag_opt = pass_.run(dag_in)
# note: cx(2, 1) commutes both with ccx(3, 4, 0) and with cx(2, 4),
# so there is no real difference with the circuit drawn on the picture above.
circuit_expected = QuantumCircuit(qr)
circuit_expected.cx(qr[2], qr[1])
circuit_expected.ccx(qr[3], qr[4], qr[0])
circuit_expected.cx(qr[2], qr[4])
circuit_expected.z(qr[1])
circuit_expected.h(qr[3])
circuit_expected.cx(qr[2], qr[3])
circuit_expected.ccx(qr[2], qr[3], qr[0])
dag_expected = circuit_to_dag(circuit_expected)
self.assertEqual(dag_opt, dag_expected)
def test_pass_template_wrong_type(self):
"""
If a template is not equivalent to the identity, it raises an error.
"""
qr = QuantumRegister(2, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
dag_in = circuit_to_dag(circuit_in)
qrt = QuantumRegister(2, "qrc")
qct = QuantumCircuit(qrt)
qct.cx(0, 1)
qct.x(0)
qct.h(1)
template_list = [qct]
pass_ = TemplateOptimization(template_list)
self.assertRaises(TranspilerError, pass_.run, dag_in)
def test_accept_dagdependency(self):
"""
Check that users can supply DAGDependency in the template list.
"""
circuit_in = QuantumCircuit(2)
circuit_in.cnot(0, 1)
circuit_in.cnot(0, 1)
templates = [circuit_to_dagdependency(circuit_in)]
pass_ = TemplateOptimization(template_list=templates)
circuit_out = PassManager(pass_).run(circuit_in)
# these are NOT equal if template optimization works
self.assertNotEqual(circuit_in, circuit_out)
# however these are equivalent if the operators are the same
self.assertTrue(Operator(circuit_in).equiv(circuit_out))
def test_parametric_template(self):
"""
Check matching where template has parameters.
βββββββββββββ ββββββββββ
q_0: β€ P(-1.0*Ξ²) ββββ βββββββββββββ βββ€0 β
βββββββββββββ€βββ΄βββββββββββββ΄βββ CU(2Ξ²)β
q_1: β€ P(-1.0*Ξ²) ββ€ X ββ€ P(Ξ²) ββ€ X ββ€1 β
βββββββββββββββββββββββββββββββββββββββββ
First test try match on
βββββββββ
q_0: β€ P(-2) ββββ βββββββββββββ βββββββββββββββββββββββββββββ
βββββββββ€βββ΄βββββββββββββ΄βββββββββββ
q_1: β€ P(-2) ββ€ X ββ€ P(2) ββ€ X ββ€ P(-3) ββββ βββββββββββββ ββ
βββββββββ€ββββββββββββββββββββββββββββββ΄βββββββββββββ΄ββ
q_2: β€ P(-3) βββββββββββββββββββββββββββββ€ X ββ€ P(3) ββ€ X β
βββββββββ ββββββββββββββββββ
Second test try match on
βββββββββ
q_0: β€ P(-2) ββββ βββββββββββββ ββββββββββββββββββββββββββββ
βββββββββ€βββ΄βββββββββββββ΄ββββββββββ
q_1: β€ P(-2) ββ€ X ββ€ P(2) ββ€ X ββ€ P(3) ββββ βββββββββββββ ββ
ββ¬βββββββ€βββββββββββββββββββββββββββββ΄βββββββββββββ΄ββ
q_2: ββ€ P(3) ββββββββββββββββββββββββββββ€ X ββ€ P(3) ββ€ X β
ββββββββ ββββββββββββββββββ
"""
beta = Parameter("Ξ²")
template = QuantumCircuit(2)
template.p(-beta, 0)
template.p(-beta, 1)
template.cx(0, 1)
template.p(beta, 1)
template.cx(0, 1)
template.cu(0, 2.0 * beta, 0, 0, 0, 1)
def count_cx(qc):
"""Counts the number of CX gates for testing."""
return qc.count_ops().get("cx", 0)
circuit_in = QuantumCircuit(3)
circuit_in.p(-2, 0)
circuit_in.p(-2, 1)
circuit_in.cx(0, 1)
circuit_in.p(2, 1)
circuit_in.cx(0, 1)
circuit_in.p(-3, 1)
circuit_in.p(-3, 2)
circuit_in.cx(1, 2)
circuit_in.p(3, 2)
circuit_in.cx(1, 2)
pass_ = TemplateOptimization(
template_list=[template],
user_cost_dict={"cx": 6, "p": 0, "cu": 8},
)
circuit_out = PassManager(pass_).run(circuit_in)
np.testing.assert_almost_equal(Operator(circuit_out).data[3, 3], np.exp(-4.0j))
np.testing.assert_almost_equal(Operator(circuit_out).data[7, 7], np.exp(-10.0j))
self.assertEqual(count_cx(circuit_out), 0) # Two matches => no CX gates.
np.testing.assert_almost_equal(Operator(circuit_in).data, Operator(circuit_out).data)
circuit_in = QuantumCircuit(3)
circuit_in.p(-2, 0)
circuit_in.p(-2, 1)
circuit_in.cx(0, 1)
circuit_in.p(2, 1)
circuit_in.cx(0, 1)
circuit_in.p(3, 1)
circuit_in.p(3, 2)
circuit_in.cx(1, 2)
circuit_in.p(3, 2)
circuit_in.cx(1, 2)
pass_ = TemplateOptimization(
template_list=[template],
user_cost_dict={"cx": 6, "p": 0, "cu": 8},
)
circuit_out = PassManager(pass_).run(circuit_in)
# these are NOT equal if template optimization works
self.assertNotEqual(circuit_in, circuit_out)
# however these are equivalent if the operators are the same
self.assertTrue(Operator(circuit_in).equiv(circuit_out))
def test_optimizer_does_not_replace_unbound_partial_match(self):
"""
Test that partial matches with parameters will not raise errors.
This tests that if parameters are still in the temporary template after
_attempt_bind then they will not be used.
"""
beta = Parameter("Ξ²")
template = QuantumCircuit(2)
template.cx(1, 0)
template.cx(1, 0)
template.p(beta, 1)
template.cu(0, 0, 0, -beta, 0, 1)
circuit_in = QuantumCircuit(2)
circuit_in.cx(1, 0)
circuit_in.cx(1, 0)
pass_ = TemplateOptimization(
template_list=[template],
user_cost_dict={"cx": 6, "p": 0, "cu": 8},
)
circuit_out = PassManager(pass_).run(circuit_in)
# The template optimisation should not have replaced anything, because
# that would require it to leave dummy parameters in place without
# binding them.
self.assertEqual(circuit_in, circuit_out)
def test_unbound_parameters_in_rzx_template(self):
"""
Test that rzx template ('zz2') functions correctly for a simple
circuit with an unbound ParameterExpression. This uses the same
Parameter (theta) as the template, so this also checks that template
substitution handle this correctly.
"""
theta = Parameter("Ο΄")
circuit_in = QuantumCircuit(2)
circuit_in.cx(0, 1)
circuit_in.p(2 * theta, 1)
circuit_in.cx(0, 1)
pass_ = TemplateOptimization(**rzx_templates(["zz2"]))
circuit_out = PassManager(pass_).run(circuit_in)
# these are NOT equal if template optimization works
self.assertNotEqual(circuit_in, circuit_out)
# however these are equivalent if the operators are the same
theta_set = 0.42
self.assertTrue(
Operator(circuit_in.bind_parameters({theta: theta_set})).equiv(
circuit_out.bind_parameters({theta: theta_set})
)
)
def test_two_parameter_template(self):
"""
Test a two-Parameter template based on rzx_templates(["zz3"]),
βββββββββββββββββββββββββββββββββΒ»
q_0: βββ ββββββββββββββ βββ€ X ββ€ Rz(Ο) ββ€ X ββ€ Rz(-1.0*Ο) βΒ»
βββ΄ββββββββββββββ΄βββββ¬ββββββββββββββ¬ββββββββββββββββΒ»
q_1: β€ X ββ€ Rz(ΞΈ) ββ€ X ββββ ββββββββββββββ ββββββββββββββββΒ»
βββββββββββββββββββ
Β« ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββΒ»
Β«q_0: β€ Rz(Ο/2) ββ€ Rx(Ο/2) ββ€ Rz(Ο/2) ββ€ Rx(1.0*Ο) ββ€1 βΒ»
Β« βββββββββββββββββββββββββββββββββββββββββββββββ Rzx(-1.0*Ο) βΒ»
Β«q_1: βββββββββββββββββββββββββββββββββββββββββββββββ€0 βΒ»
Β« ββββββββββββββββΒ»
Β« βββββββββββ ββββββββββββββββββββββ Β»
Β«q_0: ββ€ Rz(Ο/2) ββββ€ Rx(Ο/2) ββ€ Rz(Ο/2) βββββββββββββββββββββββββΒ»
Β« ββ΄ββββββββββ΄βββββββββββββ€βββββββββββ€ββββββββββββββββββββββββΒ»
Β«q_1: β€ Rz(-1.0*ΞΈ) ββ€ Rz(Ο/2) ββ€ Rx(Ο/2) ββ€ Rz(Ο/2) ββ€ Rx(1.0*ΞΈ) βΒ»
Β« ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββΒ»
Β« ββββββββββββββββ
Β«q_0: β€0 ββββββββββββββββββββββββββββββββββ
Β« β Rzx(-1.0*ΞΈ) ββββββββββββββββββββββββββββββββββ
Β«q_1: β€1 ββ€ Rz(Ο/2) ββ€ Rx(Ο/2) ββ€ Rz(Ο/2) β
Β« βββββββββββββββββββββββββββββββββββββββββββββββββ
correctly template matches into a unique circuit, but that it is
equivalent to the input circuit when the Parameters are bound to floats
and checked with Operator equivalence.
"""
theta = Parameter("ΞΈ")
phi = Parameter("Ο")
template = QuantumCircuit(2)
template.cx(0, 1)
template.rz(theta, 1)
template.cx(0, 1)
template.cx(1, 0)
template.rz(phi, 0)
template.cx(1, 0)
template.rz(-phi, 0)
template.rz(np.pi / 2, 0)
template.rx(np.pi / 2, 0)
template.rz(np.pi / 2, 0)
template.rx(phi, 0)
template.rzx(-phi, 1, 0)
template.rz(np.pi / 2, 0)
template.rz(-theta, 1)
template.rx(np.pi / 2, 0)
template.rz(np.pi / 2, 1)
template.rz(np.pi / 2, 0)
template.rx(np.pi / 2, 1)
template.rz(np.pi / 2, 1)
template.rx(theta, 1)
template.rzx(-theta, 0, 1)
template.rz(np.pi / 2, 1)
template.rx(np.pi / 2, 1)
template.rz(np.pi / 2, 1)
alpha = Parameter("$\\alpha$")
beta = Parameter("$\\beta$")
circuit_in = QuantumCircuit(2)
circuit_in.cx(0, 1)
circuit_in.rz(2 * alpha, 1)
circuit_in.cx(0, 1)
circuit_in.cx(1, 0)
circuit_in.rz(3 * beta, 0)
circuit_in.cx(1, 0)
pass_ = TemplateOptimization(
[template],
user_cost_dict={"cx": 6, "rz": 0, "rx": 1, "rzx": 0},
)
circuit_out = PassManager(pass_).run(circuit_in)
# these are NOT equal if template optimization works
self.assertNotEqual(circuit_in, circuit_out)
# however these are equivalent if the operators are the same
alpha_set = 0.37
beta_set = 0.42
self.assertTrue(
Operator(circuit_in.bind_parameters({alpha: alpha_set, beta: beta_set})).equiv(
circuit_out.bind_parameters({alpha: alpha_set, beta: beta_set})
)
)
def test_exact_substitution_numeric_parameter(self):
"""Test that a template match produces the expected value for numeric parameters."""
circuit_in = QuantumCircuit(1)
circuit_in.rx(-np.pi / 2, 0)
circuit_in.ry(1.45, 0)
circuit_in.rx(np.pi / 2, 0)
circuit_out = _ry_to_rz_template_pass().run(circuit_in)
expected = QuantumCircuit(1)
expected.rz(1.45, 0)
self.assertEqual(circuit_out, expected)
def test_exact_substitution_symbolic_parameter(self):
"""Test that a template match produces the expected value for numeric parameters."""
a_circuit = Parameter("a")
circuit_in = QuantumCircuit(1)
circuit_in.h(0)
circuit_in.rx(-np.pi / 2, 0)
circuit_in.ry(a_circuit, 0)
circuit_in.rx(np.pi / 2, 0)
circuit_out = _ry_to_rz_template_pass(extra_costs={"h": 1}).run(circuit_in)
expected = QuantumCircuit(1)
expected.h(0)
expected.rz(a_circuit, 0)
self.assertEqual(circuit_out, expected)
def test_naming_clash(self):
"""Test that the template matching works and correctly replaces a template if there is a
naming clash between it and the circuit. This should include binding a partial match with a
parameter."""
# Two instances of parameters with the same name---this is how naming clashes might occur.
a_template = Parameter("a")
a_circuit = Parameter("a")
circuit_in = QuantumCircuit(1)
circuit_in.h(0)
circuit_in.rx(-np.pi / 2, 0)
circuit_in.ry(a_circuit, 0)
circuit_in.rx(np.pi / 2, 0)
circuit_out = _ry_to_rz_template_pass(a_template, extra_costs={"h": 1}).run(circuit_in)
expected = QuantumCircuit(1)
expected.h(0)
expected.rz(a_circuit, 0)
self.assertEqual(circuit_out, expected)
# Ensure that the bound parameter in the output is referentially the same as the one we put
# in the input circuit..
self.assertEqual(len(circuit_out.parameters), 1)
self.assertIs(circuit_in.parameters[0], a_circuit)
self.assertIs(circuit_out.parameters[0], a_circuit)
def test_naming_clash_in_expression(self):
"""Test that the template matching works and correctly replaces a template if there is a
naming clash between it and the circuit. This should include binding a partial match with a
parameter."""
a_template = Parameter("a")
a_circuit = Parameter("a")
circuit_in = QuantumCircuit(1)
circuit_in.h(0)
circuit_in.rx(-np.pi / 2, 0)
circuit_in.ry(2 * a_circuit, 0)
circuit_in.rx(np.pi / 2, 0)
circuit_out = _ry_to_rz_template_pass(a_template, extra_costs={"h": 1}).run(circuit_in)
expected = QuantumCircuit(1)
expected.h(0)
expected.rz(2 * a_circuit, 0)
self.assertEqual(circuit_out, expected)
# Ensure that the bound parameter in the output is referentially the same as the one we put
# in the input circuit..
self.assertEqual(len(circuit_out.parameters), 1)
self.assertIs(circuit_in.parameters[0], a_circuit)
self.assertIs(circuit_out.parameters[0], a_circuit)
def test_template_match_with_uninvolved_parameter(self):
"""Test that the template matching algorithm succeeds at matching a circuit that contains an
unbound parameter that is not involved in the subcircuit that matches."""
b_circuit = Parameter("b")
circuit_in = QuantumCircuit(2)
circuit_in.rz(b_circuit, 0)
circuit_in.rx(-np.pi / 2, 1)
circuit_in.ry(1.45, 1)
circuit_in.rx(np.pi / 2, 1)
circuit_out = _ry_to_rz_template_pass().run(circuit_in)
expected = QuantumCircuit(2)
expected.rz(b_circuit, 0)
expected.rz(1.45, 1)
self.assertEqual(circuit_out, expected)
def test_multiple_numeric_matches_same_template(self):
"""Test that the template matching will change both instances of a partial match within a
longer circuit."""
circuit_in = QuantumCircuit(2)
# Qubit 0
circuit_in.rx(-np.pi / 2, 0)
circuit_in.ry(1.32, 0)
circuit_in.rx(np.pi / 2, 0)
# Qubit 1
circuit_in.rx(-np.pi / 2, 1)
circuit_in.ry(2.54, 1)
circuit_in.rx(np.pi / 2, 1)
circuit_out = _ry_to_rz_template_pass().run(circuit_in)
expected = QuantumCircuit(2)
expected.rz(1.32, 0)
expected.rz(2.54, 1)
self.assertEqual(circuit_out, expected)
def test_multiple_symbolic_matches_same_template(self):
"""Test that the template matching will change both instances of a partial match within a
longer circuit."""
a, b = Parameter("a"), Parameter("b")
circuit_in = QuantumCircuit(2)
# Qubit 0
circuit_in.rx(-np.pi / 2, 0)
circuit_in.ry(a, 0)
circuit_in.rx(np.pi / 2, 0)
# Qubit 1
circuit_in.rx(-np.pi / 2, 1)
circuit_in.ry(b, 1)
circuit_in.rx(np.pi / 2, 1)
circuit_out = _ry_to_rz_template_pass().run(circuit_in)
expected = QuantumCircuit(2)
expected.rz(a, 0)
expected.rz(b, 1)
self.assertEqual(circuit_out, expected)
def test_template_match_multiparameter(self):
"""Test that the template matching works on instructions that take more than one
parameter."""
a = Parameter("a")
b = Parameter("b")
template = QuantumCircuit(1)
template.u(0, a, b, 0)
template.rz(-a - b, 0)
circuit_in = QuantumCircuit(1)
circuit_in.u(0, 1.23, 2.45, 0)
pm = PassManager(TemplateOptimization([template], user_cost_dict={"u": 16, "rz": 0}))
circuit_out = pm.run(circuit_in)
expected = QuantumCircuit(1)
expected.rz(1.23 + 2.45, 0)
self.assertEqual(circuit_out, expected)
def test_naming_clash_multiparameter(self):
"""Test that the naming clash prevention mechanism works with instructions that take
multiple parameters."""
a_template = Parameter("a")
b_template = Parameter("b")
template = QuantumCircuit(1)
template.u(0, a_template, b_template, 0)
template.rz(-a_template - b_template, 0)
a_circuit = Parameter("a")
b_circuit = Parameter("b")
circuit_in = QuantumCircuit(1)
circuit_in.u(0, a_circuit, b_circuit, 0)
pm = PassManager(TemplateOptimization([template], user_cost_dict={"u": 16, "rz": 0}))
circuit_out = pm.run(circuit_in)
expected = QuantumCircuit(1)
expected.rz(a_circuit + b_circuit, 0)
self.assertEqual(circuit_out, expected)
def test_consecutive_templates_apply(self):
"""Test the scenario where one template optimization creates an opportunity for
another template optimization.
This is the original circuit:
βββββ
q_0: β€ X ββββ βββXββββββββ β
βββ¬βββββ΄ββ β βββββ β
q_1: βββ βββ€ X ββXββ€ H βββ β
βββββ βββββ
The clifford_4_1 template allows to replace the two CNOTs followed by the SWAP by a
single CNOT:
q_0: βββ βββββββββ β
βββ΄βββββββ β
q_1: β€ X ββ€ H βββ β
ββββββββββ
At these point, the clifford_4_2 template allows to replace the circuit by a single
Hadamard gate:
q_0: βββββ
βββββ
q_1: β€ H β
βββββ
The second optimization would not have been possible without the applying the first
optimization.
"""
qc = QuantumCircuit(2)
qc.cx(1, 0)
qc.cx(0, 1)
qc.swap(0, 1)
qc.h(1)
qc.cz(0, 1)
qc_expected = QuantumCircuit(2)
qc_expected.h(1)
costs = {"h": 1, "cx": 2, "cz": 2, "swap": 3}
# Check that consecutively applying both templates leads to the expected circuit.
qc_opt = TemplateOptimization(
template_list=[clifford_4_1(), clifford_4_2()], user_cost_dict=costs
)(qc)
self.assertEqual(qc_opt, qc_expected)
# Also check that applying the second template by itself does not do anything.
qc_non_opt = TemplateOptimization(template_list=[clifford_4_2()], user_cost_dict=costs)(qc)
self.assertEqual(qc, qc_non_opt)
def test_consecutive_templates_do_not_apply(self):
"""Test that applying one template optimization does not allow incorrectly
applying other templates (which could happen if the DagDependency graph is
not constructed correctly after the optimization).
"""
template_list = [
clifford_2_2(),
clifford_2_3(),
]
pm = PassManager(TemplateOptimization(template_list=template_list))
qc = QuantumCircuit(2)
qc.cx(0, 1)
qc.cx(0, 1)
qc.h(0)
qc.swap(0, 1)
qc.h(0)
qc_opt = pm.run(qc)
self.assertTrue(Operator(qc) == Operator(qc_opt))
def test_clifford_templates(self):
"""Tests TemplateOptimization pass on several larger examples."""
template_list = [
clifford_2_1(),
clifford_2_2(),
clifford_2_3(),
clifford_2_4(),
clifford_3_1(),
]
pm = PassManager(TemplateOptimization(template_list=template_list))
for seed in range(10):
qc = random_clifford_circuit(
num_qubits=5,
num_gates=100,
gates=["x", "y", "z", "h", "s", "sdg", "cx", "cz", "swap"],
seed=seed,
)
qc_opt = pm.run(qc)
self.assertTrue(Operator(qc) == Operator(qc_opt))
if __name__ == "__main__":
unittest.main()
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import pulse
d0 = pulse.DriveChannel(0)
x90 = pulse.Gaussian(10, 0.1, 3)
x180 = pulse.Gaussian(10, 0.2, 3)
with pulse.build() as hahn_echo:
with pulse.align_equispaced(duration=100):
pulse.play(x90, d0)
pulse.play(x180, d0)
pulse.play(x90, d0)
hahn_echo.draw()
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Stinespring quantum channel representation class."""
import copy
import unittest
import numpy as np
from numpy.testing import assert_allclose
from qiskit import QiskitError
from qiskit.quantum_info.states import DensityMatrix
from qiskit.quantum_info import Stinespring
from .channel_test_case import ChannelTestCase
class TestStinespring(ChannelTestCase):
"""Tests for Stinespring channel representation."""
def test_init(self):
"""Test initialization"""
# Initialize from unitary
chan = Stinespring(self.UI)
assert_allclose(chan.data, self.UI)
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(chan.num_qubits, 1)
# Initialize from Stinespring
chan = Stinespring(self.depol_stine(0.5))
assert_allclose(chan.data, self.depol_stine(0.5))
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(chan.num_qubits, 1)
# Initialize from Non-CPTP
stine_l, stine_r = self.rand_matrix(4, 2), self.rand_matrix(4, 2)
chan = Stinespring((stine_l, stine_r))
assert_allclose(chan.data, (stine_l, stine_r))
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(chan.num_qubits, 1)
# Initialize with redundant second op
chan = Stinespring((stine_l, stine_l))
assert_allclose(chan.data, stine_l)
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(chan.num_qubits, 1)
# Wrong input or output dims should raise exception
self.assertRaises(QiskitError, Stinespring, stine_l, input_dims=4, output_dims=4)
def test_circuit_init(self):
"""Test initialization from a circuit."""
circuit, target = self.simple_circuit_no_measure()
op = Stinespring(circuit)
target = Stinespring(target)
self.assertEqual(op, target)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Stinespring, circuit)
def test_equal(self):
"""Test __eq__ method"""
stine = tuple(self.rand_matrix(4, 2) for _ in range(2))
self.assertEqual(Stinespring(stine), Stinespring(stine))
def test_copy(self):
"""Test copy method"""
mat = np.eye(4)
with self.subTest("Deep copy"):
orig = Stinespring(mat)
cpy = orig.copy()
cpy._data[0][0, 0] = 0.0
self.assertFalse(cpy == orig)
with self.subTest("Shallow copy"):
orig = Stinespring(mat)
clone = copy.copy(orig)
clone._data[0][0, 0] = 0.0
self.assertTrue(clone == orig)
def test_clone(self):
"""Test clone method"""
mat = np.eye(4)
orig = Stinespring(mat)
clone = copy.copy(orig)
clone._data[0][0, 0] = 0.0
self.assertTrue(clone == orig)
def test_is_cptp(self):
"""Test is_cptp method."""
self.assertTrue(Stinespring(self.depol_stine(0.5)).is_cptp())
self.assertTrue(Stinespring(self.UX).is_cptp())
# Non-CP
stine_l, stine_r = self.rand_matrix(4, 2), self.rand_matrix(4, 2)
self.assertFalse(Stinespring((stine_l, stine_r)).is_cptp())
self.assertFalse(Stinespring(self.UI + self.UX).is_cptp())
def test_conjugate(self):
"""Test conjugate method."""
stine_l, stine_r = self.rand_matrix(16, 2), self.rand_matrix(16, 2)
# Single Stinespring list
targ = Stinespring(stine_l.conj(), output_dims=4)
chan1 = Stinespring(stine_l, output_dims=4)
chan = chan1.conjugate()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (2, 4))
# Double Stinespring list
targ = Stinespring((stine_l.conj(), stine_r.conj()), output_dims=4)
chan1 = Stinespring((stine_l, stine_r), output_dims=4)
chan = chan1.conjugate()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (2, 4))
def test_transpose(self):
"""Test transpose method."""
stine_l, stine_r = self.rand_matrix(4, 2), self.rand_matrix(4, 2)
# Single square Stinespring list
targ = Stinespring(stine_l.T, 4, 2)
chan1 = Stinespring(stine_l, 2, 4)
chan = chan1.transpose()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
# Double square Stinespring list
targ = Stinespring((stine_l.T, stine_r.T), 4, 2)
chan1 = Stinespring((stine_l, stine_r), 2, 4)
chan = chan1.transpose()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
def test_adjoint(self):
"""Test adjoint method."""
stine_l, stine_r = self.rand_matrix(4, 2), self.rand_matrix(4, 2)
# Single square Stinespring list
targ = Stinespring(stine_l.T.conj(), 4, 2)
chan1 = Stinespring(stine_l, 2, 4)
chan = chan1.adjoint()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
# Double square Stinespring list
targ = Stinespring((stine_l.T.conj(), stine_r.T.conj()), 4, 2)
chan1 = Stinespring((stine_l, stine_r), 2, 4)
chan = chan1.adjoint()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError, Stinespring(np.eye(2)).compose, Stinespring(np.eye(4)))
self.assertRaises(QiskitError, Stinespring(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
# Random input test state
rho_init = DensityMatrix(self.rand_rho(2))
# UnitaryChannel evolution
chan1 = Stinespring(self.UX)
chan2 = Stinespring(self.UY)
chan = chan1.compose(chan2)
rho_targ = rho_init & Stinespring(self.UZ)
self.assertEqual(rho_init.evolve(chan), rho_targ)
# 50% depolarizing channel
chan1 = Stinespring(self.depol_stine(0.5))
chan = chan1.compose(chan1)
rho_targ = rho_init & Stinespring(self.depol_stine(0.75))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Compose different dimensions
stine1, stine2 = self.rand_matrix(16, 2), self.rand_matrix(8, 4)
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=4, output_dims=2)
rho_targ = rho_init & chan1 & chan2
chan = chan1.compose(chan2)
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = chan1 & chan2
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_dot(self):
"""Test deprecated front compose method."""
# Random input test state
rho_init = DensityMatrix(self.rand_rho(2))
# UnitaryChannel evolution
chan1 = Stinespring(self.UX)
chan2 = Stinespring(self.UY)
rho_targ = rho_init.evolve(Stinespring(self.UZ))
self.assertEqual(rho_init.evolve(chan1.dot(chan2)), rho_targ)
self.assertEqual(rho_init.evolve(chan1 @ chan2), rho_targ)
# 50% depolarizing channel
chan1 = Stinespring(self.depol_stine(0.5))
rho_targ = rho_init & Stinespring(self.depol_stine(0.75))
self.assertEqual(rho_init.evolve(chan1.dot(chan1)), rho_targ)
self.assertEqual(rho_init.evolve(chan1 @ chan1), rho_targ)
# Compose different dimensions
stine1, stine2 = self.rand_matrix(16, 2), self.rand_matrix(8, 4)
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=4, output_dims=2)
rho_targ = rho_init & chan1 & chan2
self.assertEqual(rho_init.evolve(chan2.dot(chan1)), rho_targ)
self.assertEqual(rho_init.evolve(chan2 @ chan1), rho_targ)
def test_compose_front(self):
"""Test deprecated front compose method."""
# Random input test state
rho_init = DensityMatrix(self.rand_rho(2))
# UnitaryChannel evolution
chan1 = Stinespring(self.UX)
chan2 = Stinespring(self.UY)
chan = chan1.compose(chan2, front=True)
rho_targ = rho_init & Stinespring(self.UZ)
self.assertEqual(rho_init.evolve(chan), rho_targ)
# 50% depolarizing channel
chan1 = Stinespring(self.depol_stine(0.5))
chan = chan1.compose(chan1, front=True)
rho_targ = rho_init & Stinespring(self.depol_stine(0.75))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Compose different dimensions
stine1, stine2 = self.rand_matrix(16, 2), self.rand_matrix(8, 4)
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=4, output_dims=2)
rho_targ = rho_init & chan1 & chan2
chan = chan2.compose(chan1, front=True)
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_expand(self):
"""Test expand method."""
rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])
rho_init = DensityMatrix(np.kron(rho0, rho0))
chan1 = Stinespring(self.UI)
chan2 = Stinespring(self.UX)
# X \otimes I
chan = chan1.expand(chan2)
rho_targ = DensityMatrix(np.kron(rho1, rho0))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# I \otimes X
chan = chan2.expand(chan1)
rho_targ = DensityMatrix(np.kron(rho0, rho1))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Completely depolarizing
chan_dep = Stinespring(self.depol_stine(1))
chan = chan_dep.expand(chan_dep)
rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_tensor(self):
"""Test tensor method."""
rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])
rho_init = DensityMatrix(np.kron(rho0, rho0))
chan1 = Stinespring(self.UI)
chan2 = Stinespring(self.UX)
# X \otimes I
chan = chan2.tensor(chan1)
rho_targ = DensityMatrix(np.kron(rho1, rho0))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# I \otimes X
chan = chan1.tensor(chan2)
rho_targ = DensityMatrix(np.kron(rho0, rho1))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Completely depolarizing
chan_dep = Stinespring(self.depol_stine(1))
chan = chan_dep.tensor(chan_dep)
rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_power(self):
"""Test power method."""
# 10% depolarizing channel
rho_init = DensityMatrix(np.diag([1, 0]))
p_id = 0.9
chan1 = Stinespring(self.depol_stine(1 - p_id))
# Compose 3 times
p_id3 = p_id**3
chan = chan1.power(3)
rho_targ = rho_init & chan1 & chan1 & chan1
self.assertEqual(rho_init & chan, rho_targ)
rho_targ = rho_init & Stinespring(self.depol_stine(1 - p_id3))
self.assertEqual(rho_init & chan, rho_targ)
def test_add(self):
"""Test add method."""
# Random input test state
rho_init = DensityMatrix(self.rand_rho(2))
stine1, stine2 = self.rand_matrix(16, 2), self.rand_matrix(16, 2)
# Random Single-Stinespring maps
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=2, output_dims=4)
rho_targ = (rho_init & chan1) + (rho_init & chan2)
chan = chan1._add(chan2)
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = chan1 + chan2
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Random Single-Stinespring maps
chan = Stinespring((stine1, stine2))
rho_targ = 2 * (rho_init & chan)
chan = chan._add(chan)
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_subtract(self):
"""Test subtract method."""
# Random input test state
rho_init = DensityMatrix(self.rand_rho(2))
stine1, stine2 = self.rand_matrix(16, 2), self.rand_matrix(16, 2)
# Random Single-Stinespring maps
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=2, output_dims=4)
rho_targ = (rho_init & chan1) - (rho_init & chan2)
chan = chan1 - chan2
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Random Single-Stinespring maps
chan = Stinespring((stine1, stine2))
rho_targ = 0 * (rho_init & chan)
chan = chan - chan
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_add_qargs(self):
"""Test add method with qargs."""
rho = DensityMatrix(self.rand_rho(8))
stine = self.rand_matrix(32, 8)
stine0 = self.rand_matrix(8, 2)
op = Stinespring(stine)
op0 = Stinespring(stine0)
eye = Stinespring(self.UI)
with self.subTest(msg="qargs=[0]"):
value = op + op0([0])
target = op + eye.tensor(eye).tensor(op0)
self.assertEqual(rho & value, rho & target)
with self.subTest(msg="qargs=[1]"):
value = op + op0([1])
target = op + eye.tensor(op0).tensor(eye)
self.assertEqual(rho & value, rho & target)
with self.subTest(msg="qargs=[2]"):
value = op + op0([2])
target = op + op0.tensor(eye).tensor(eye)
self.assertEqual(rho & value, rho & target)
def test_sub_qargs(self):
"""Test sub method with qargs."""
rho = DensityMatrix(self.rand_rho(8))
stine = self.rand_matrix(32, 8)
stine0 = self.rand_matrix(8, 2)
op = Stinespring(stine)
op0 = Stinespring(stine0)
eye = Stinespring(self.UI)
with self.subTest(msg="qargs=[0]"):
value = op - op0([0])
target = op - eye.tensor(eye).tensor(op0)
self.assertEqual(rho & value, rho & target)
with self.subTest(msg="qargs=[1]"):
value = op - op0([1])
target = op - eye.tensor(op0).tensor(eye)
self.assertEqual(rho & value, rho & target)
with self.subTest(msg="qargs=[2]"):
value = op - op0([2])
target = op - op0.tensor(eye).tensor(eye)
self.assertEqual(rho & value, rho & target)
def test_multiply(self):
"""Test multiply method."""
# Random initial state and Stinespring ops
rho_init = DensityMatrix(self.rand_rho(2))
val = 0.5
stine1, stine2 = self.rand_matrix(16, 2), self.rand_matrix(16, 2)
# Single Stinespring set
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
rho_targ = val * (rho_init & chan1)
chan = chan1._multiply(val)
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = val * chan1
self.assertEqual(rho_init.evolve(chan), rho_targ)
rho_targ = (rho_init & chan1) * val
chan = chan1 * val
self.assertEqual(rho_init.evolve(chan), rho_targ)
# Double Stinespring set
chan2 = Stinespring((stine1, stine2), input_dims=2, output_dims=4)
rho_targ = val * (rho_init & chan2)
chan = chan2._multiply(val)
self.assertEqual(rho_init.evolve(chan), rho_targ)
chan = val * chan2
self.assertEqual(rho_init.evolve(chan), rho_targ)
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
chan = Stinespring(self.depol_stine(1))
self.assertRaises(QiskitError, chan._multiply, "s")
self.assertRaises(QiskitError, chan.__rmul__, "s")
self.assertRaises(QiskitError, chan._multiply, chan)
self.assertRaises(QiskitError, chan.__rmul__, chan)
def test_negate(self):
"""Test negate method"""
rho_init = DensityMatrix(np.diag([1, 0]))
rho_targ = DensityMatrix(np.diag([-0.5, -0.5]))
chan = -Stinespring(self.depol_stine(1))
self.assertEqual(rho_init.evolve(chan), rho_targ)
if __name__ == "__main__":
unittest.main()
|
https://github.com/jhlee29/quantum-meets-hangul
|
jhlee29
|
# from PIL import Image
from PIL import Image, ImageOps
import os, glob
import numpy as np
from sklearn import model_selection #cross_validation
# from keras.utils import np_utils
# General imports
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from pylab import cm
import warnings
# input
classes = ["a", "b"]
# image size of 28 x 28
image_size = 28
# image_size = 8
# Maximum number of sheets to read
max_read = 100
num_classes = len(classes)
# load the image
X = []
Y = []
for index, classlabel in enumerate(classes):
images_dir = "./hangul_characters/" + classlabel
files = glob.glob(images_dir + "/*.jpg")
for i, file in enumerate(files):
# Stop if you read more than max_read to make the number of sheets for each class
if i >= max_read: break
# open the file, read as data, add to X
# Repeatedly add labels with the same index to Y
image = Image.open(file)
image = ImageOps.invert(image)
image = image.convert("L")
image = image.resize((image_size, image_size))
data = np.asarray(image)
X.append(data)
Y.append(index)
X = np.array(X)
Y = np.array(Y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, test_size=0.2, random_state=42)
X_train = X_train.reshape(len(X_train), -1).astype(np.float64)
X_test = X_test.reshape(len(X_test), -1).astype(np.float64)
fig = plt.figure()
LABELS = [0, 1]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = X[Y==LABELS[i]][0].reshape((28,28))
ax.imshow(img, cmap="Greys")
print(Y)
print(X_train[0])
warnings.filterwarnings("ignore")
# scikit-learn imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Qiskit imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
# Standardize
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
#sample_test = ss.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# Normalize
mms = MinMaxScaler((-1, 1))
X_train = mms.fit_transform(X_train)
X_test = mms.transform(X_test)
#sample_test = mms.transform(sample_test)
# 3 features, depth 1, linear entanglement
map_zz = ZZFeatureMap(feature_dimension=5, reps=1, entanglement='linear')
map_zz.decompose().draw('mpl')
zz_kernel = QuantumKernel(feature_map=map_zz, quantum_instance=Aer.get_backend('statevector_simulator'))
matrix_train = zz_kernel.evaluate(x_vec=X_train)
matrix_val = zz_kernel.evaluate(x_vec=X_test, y_vec=X_train)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(matrix_train),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(matrix_val),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("validation kernel matrix")
plt.show()
zz_svc = SVC(kernel='precomputed')
zz_svc.fit(matrix_train, y_train)
zz_score = zz_svc.score(matrix_val, y_test)
print(f'Precomputed kernel classification test score: {zz_score}')
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
# Choose the drawer you like best:
from qiskit.tools.visualization import matplotlib_circuit_drawer as draw
#from qiskit.tools.visualization import circuit_drawer as draw
from qiskit import IBMQ
IBMQ.load_accounts() # make sure you have setup your token locally to use this
%matplotlib inline
import matplotlib.pyplot as plt
def show_results(D):
# D is a dictionary with classical bits as keys and count as value
# example: D = {'000': 497, '001': 527}
plt.bar(range(len(D)), list(D.values()), align='center')
plt.xticks(range(len(D)), list(D.keys()))
plt.show()
from qiskit import Aer
# See a list of available local simulators
print("Aer backends: ", Aer.backends())
# see a list of available remote backends (these are freely given by IBM)
print("IBMQ Backends: ", IBMQ.backends())
# execute circuit and either display a histogram of the results
def execute_locally(qc, draw_circuit=False, show_results=False):
# Compile and run the Quantum circuit on a simulator backend
backend_sim = Aer.get_backend('qasm_simulator')
job_sim = execute(qc, backend_sim)
result_sim = job_sim.result()
result_counts = result_sim.get_counts(qc)
if draw_circuit or show_results: # Print the results
print("simulation: ", result_sim, result_counts)
if draw_circuit: # draw the circuit
draw(qc)
elif show_results: # or show the results
show_results(result_counts)
return result_counts
from qiskit.backends.ibmq import least_busy
import time
# Compile and run on a real device backend
def execute_remotely(qc, draw_circuit=False, show_results=False):
if draw_circuit: # draw the circuit
draw(qc)
try:
# select least busy available device and execute.
least_busy_device = least_busy(IBMQ.backends(simulator=False))
print("Running on current least busy device: ", least_busy_device)
# running the job
job_exp = execute(qc, backend=least_busy_device, shots=1024, max_credits=10)
lapse, interval = 0, 10
while job_exp.status().name != 'DONE':
print('Status @ {} seconds'.format(interval * lapse))
print(job_exp.status())
time.sleep(interval)
lapse += 1
print(job_exp.status())
exp_result = job_exp.result()
result_counts = exp_result.get_counts(qc)
# Show the results
print("experiment: ", exp_result, result_counts)
if show_results: # show the results
show_results(result_counts)
return result_counts
except:
print("All devices are currently unavailable.")
return {}
def new_circuit(size):
# Create a Quantum Register with size qubits
qr = QuantumRegister(size)
# Create a Classical Register with size bits
cr = ClassicalRegister(size)
# Create a Quantum Circuit acting on the qr and cr register
return qr, cr, QuantumCircuit(qr, cr)
ERROR_MESSAGE = "Looks like your Deutsch has a bug"
def quantum_oracle_1(qr, cr, circuit):
pass
def quantum_oracle_2(qr, cr, circuit):
circuit.cx(qr[0], qr[1])
def quantum_oracle_3(qr, cr, circuit):
circuit.cx(qr[0], qr[1])
circuit.cx(qr[1], qr[0])
circuit.cx(qr[0], qr[1])
def quantum_oracle_4(qr, cr, circuit):
circuit.z(qr[1])
circuit.cx(qr[0], qr[1])
qr, cr, circuit = new_circuit(2)
# X gate on qubit 1 (bit flip)
circuit.x(qr[1]);
circuit.h(qr);
quantum_oracle_1(qr, cr, circuit)
circuit.h(qr[0]);
# measure the specific qubit
circuit.measure(qr[0], cr[0]);
# Try both commands:
# results = execute_locally(circuit, draw_circuit=False, show_results=False) # silent mode
results = execute_locally(circuit, draw_circuit=True, show_results=False)
# results = execute_locally(circuit, draw_circuit=False, show_results=True)
# results = execute_locally(circuit, draw_circuit=True, show_results=True) # this will be the same as True, False
if '00' in results:
print("CONSTANT")
elif '10' in results:
print("BALANCED")
def get_deutsch_verdict(res): # should be improved for error handling
if '00' in res:
return "CONSTANT"
elif '01' in res:
return "BALANCED"
print(get_deutsch_verdict(results))
def deutsch(black_box):
qr, cr, circuit = new_circuit(2)
circuit.x(qr[1]) # X gate on qubit 1 (bit flip)
circuit.h(qr) # Hadamard on both qubits
black_box(qr, cr, circuit)
circuit.h(qr[0]) # Hadamard on interesting qubit
circuit.measure(qr[0], cr[0]) # measure the specific qubit
results = execute_locally(circuit, draw_circuit=False, show_results=False) # silent mode
return get_deutsch_verdict(results)
deutsch(quantum_oracle_1)
assert deutsch(quantum_oracle_1) == 'CONSTANT', ERROR_MESSAGE
assert deutsch(quantum_oracle_2) == 'BALANCED', "Looks like your Deutsch has a bug"
assert deutsch(quantum_oracle_3) == 'BALANCED', "Looks like your Deutsch has a bug"
assert deutsch(quantum_oracle_4) == 'CONSTANT', "Looks like your Deutsch has a bug"
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
ghz = QuantumCircuit(5)
ghz.h(0)
ghz.cx(0,range(1,5))
ghz.draw(output='mpl')
|
https://github.com/ElePT/qiskit-algorithms-test
|
ElePT
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test TrotterQRTE."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from ddt import ddt, data, unpack
import numpy as np
from scipy.linalg import expm
from numpy.testing import assert_raises
from qiskit_algorithms.time_evolvers import TimeEvolutionProblem, TrotterQRTE
from qiskit.primitives import Estimator
from qiskit import QuantumCircuit
from qiskit.circuit.library import ZGate
from qiskit.quantum_info import Statevector, Pauli, SparsePauliOp
from qiskit.utils import algorithm_globals
from qiskit.circuit import Parameter
from qiskit.opflow import PauliSumOp, X, MatrixOp
from qiskit.synthesis import SuzukiTrotter, QDrift
@ddt
class TestTrotterQRTE(QiskitAlgorithmsTestCase):
"""TrotterQRTE tests."""
def setUp(self):
super().setUp()
self.seed = 50
algorithm_globals.random_seed = self.seed
@data(
(
None,
Statevector([0.29192658 - 0.45464871j, 0.70807342 - 0.45464871j]),
),
(
SuzukiTrotter(),
Statevector([0.29192658 - 0.84147098j, 0.0 - 0.45464871j]),
),
)
@unpack
def test_trotter_qrte_trotter_single_qubit(self, product_formula, expected_state):
"""Test for default TrotterQRTE on a single qubit."""
with self.assertWarns(DeprecationWarning):
operator = PauliSumOp(SparsePauliOp([Pauli("X"), Pauli("Z")]))
initial_state = QuantumCircuit(1)
time = 1
evolution_problem = TimeEvolutionProblem(operator, time, initial_state)
trotter_qrte = TrotterQRTE(product_formula=product_formula)
evolution_result_state_circuit = trotter_qrte.evolve(evolution_problem).evolved_state
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result_state_circuit).data, expected_state.data
)
@data((SparsePauliOp(["X", "Z"]), None), (SparsePauliOp(["X", "Z"]), Parameter("t")))
@unpack
def test_trotter_qrte_trotter(self, operator, t_param):
"""Test for default TrotterQRTE on a single qubit with auxiliary operators."""
if not t_param is None:
operator = SparsePauliOp(operator.paulis, np.array([t_param, 1]))
# LieTrotter with 1 rep
aux_ops = [Pauli("X"), Pauli("Y")]
initial_state = QuantumCircuit(1)
time = 3
num_timesteps = 2
evolution_problem = TimeEvolutionProblem(
operator, time, initial_state, aux_ops, t_param=t_param
)
estimator = Estimator()
expected_psi, expected_observables_result = self._get_expected_trotter_qrte(
operator,
time,
num_timesteps,
initial_state,
aux_ops,
t_param,
)
expected_evolved_state = Statevector(expected_psi)
algorithm_globals.random_seed = 0
trotter_qrte = TrotterQRTE(estimator=estimator, num_timesteps=num_timesteps)
evolution_result = trotter_qrte.evolve(evolution_problem)
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result.evolved_state).data,
expected_evolved_state.data,
)
aux_ops_result = evolution_result.aux_ops_evaluated
expected_aux_ops_result = [
(expected_observables_result[-1][0], {"variance": 0, "shots": 0}),
(expected_observables_result[-1][1], {"variance": 0, "shots": 0}),
]
means = [element[0] for element in aux_ops_result]
expected_means = [element[0] for element in expected_aux_ops_result]
np.testing.assert_array_almost_equal(means, expected_means)
vars_and_shots = [element[1] for element in aux_ops_result]
expected_vars_and_shots = [element[1] for element in expected_aux_ops_result]
observables_result = evolution_result.observables
expected_observables_result = [
[(o, {"variance": 0, "shots": 0}) for o in eor] for eor in expected_observables_result
]
means = [sub_element[0] for element in observables_result for sub_element in element]
expected_means = [
sub_element[0] for element in expected_observables_result for sub_element in element
]
np.testing.assert_array_almost_equal(means, expected_means)
for computed, expected in zip(vars_and_shots, expected_vars_and_shots):
self.assertAlmostEqual(computed.pop("variance", 0), expected["variance"], 2)
self.assertEqual(computed.pop("shots", 0), expected["shots"])
@data(
(
PauliSumOp(SparsePauliOp([Pauli("XY"), Pauli("YX")])),
Statevector([-0.41614684 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.90929743 + 0.0j]),
),
(
PauliSumOp(SparsePauliOp([Pauli("ZZ"), Pauli("ZI"), Pauli("IZ")])),
Statevector([-0.9899925 - 0.14112001j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]),
),
(
Pauli("YY"),
Statevector([0.54030231 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.84147098j]),
),
)
@unpack
def test_trotter_qrte_trotter_two_qubits(self, operator, expected_state):
"""Test for TrotterQRTE on two qubits with various types of a Hamiltonian."""
# LieTrotter with 1 rep
initial_state = QuantumCircuit(2)
evolution_problem = TimeEvolutionProblem(operator, 1, initial_state)
trotter_qrte = TrotterQRTE()
evolution_result = trotter_qrte.evolve(evolution_problem)
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result.evolved_state).data, expected_state.data
)
@data(
(QuantumCircuit(1), Statevector([0.23071786 - 0.69436148j, 0.4646314 - 0.49874749j])),
(
QuantumCircuit(1).compose(ZGate(), [0]),
Statevector([0.23071786 - 0.69436148j, 0.4646314 - 0.49874749j]),
),
)
@unpack
def test_trotter_qrte_qdrift(self, initial_state, expected_state):
"""Test for TrotterQRTE with QDrift."""
with self.assertWarns(DeprecationWarning):
operator = PauliSumOp(SparsePauliOp([Pauli("X"), Pauli("Z")]))
time = 1
evolution_problem = TimeEvolutionProblem(operator, time, initial_state)
algorithm_globals.random_seed = 0
trotter_qrte = TrotterQRTE(product_formula=QDrift())
evolution_result = trotter_qrte.evolve(evolution_problem)
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result.evolved_state).data,
expected_state.data,
)
@data((Parameter("t"), {}), (None, {Parameter("x"): 2}), (None, None))
@unpack
def test_trotter_qrte_trotter_param_errors(self, t_param, param_value_dict):
"""Test TrotterQRTE with raising errors for parameters."""
with self.assertWarns(DeprecationWarning):
operator = Parameter("t") * PauliSumOp(SparsePauliOp([Pauli("X")])) + PauliSumOp(
SparsePauliOp([Pauli("Z")])
)
initial_state = QuantumCircuit(1)
self._run_error_test(initial_state, operator, None, None, t_param, param_value_dict)
@data(([Pauli("X"), Pauli("Y")], None))
@unpack
def test_trotter_qrte_trotter_aux_ops_errors(self, aux_ops, estimator):
"""Test TrotterQRTE with raising errors."""
with self.assertWarns(DeprecationWarning):
operator = PauliSumOp(SparsePauliOp([Pauli("X")])) + PauliSumOp(
SparsePauliOp([Pauli("Z")])
)
initial_state = QuantumCircuit(1)
self._run_error_test(initial_state, operator, aux_ops, estimator, None, None)
@data(
(X, QuantumCircuit(1)),
(MatrixOp([[1, 1], [0, 1]]), QuantumCircuit(1)),
(PauliSumOp(SparsePauliOp([Pauli("X")])) + PauliSumOp(SparsePauliOp([Pauli("Z")])), None),
(
SparsePauliOp([Pauli("X"), Pauli("Z")], np.array([Parameter("a"), Parameter("b")])),
QuantumCircuit(1),
),
)
@unpack
def test_trotter_qrte_trotter_hamiltonian_errors(self, operator, initial_state):
"""Test TrotterQRTE with raising errors for evolution problem content."""
self._run_error_test(initial_state, operator, None, None, None, None)
@staticmethod
def _run_error_test(initial_state, operator, aux_ops, estimator, t_param, param_value_dict):
time = 1
algorithm_globals.random_seed = 0
trotter_qrte = TrotterQRTE(estimator=estimator)
with assert_raises(ValueError):
evolution_problem = TimeEvolutionProblem(
operator,
time,
initial_state,
aux_ops,
t_param=t_param,
param_value_map=param_value_dict,
)
_ = trotter_qrte.evolve(evolution_problem)
@staticmethod
def _get_expected_trotter_qrte(operator, time, num_timesteps, init_state, observables, t_param):
"""Compute reference values for Trotter evolution via exact matrix exponentiation."""
dt = time / num_timesteps
observables = [obs.to_matrix() for obs in observables]
psi = Statevector(init_state).data
if t_param is None:
ops = [Pauli(op).to_matrix() * np.real(coeff) for op, coeff in operator.to_list()]
observable_results = []
observable_results.append([np.real(np.conj(psi).dot(obs).dot(psi)) for obs in observables])
for n in range(num_timesteps):
if t_param is not None:
time_value = (n + 1) * dt
bound = operator.assign_parameters([time_value])
ops = [Pauli(op).to_matrix() * np.real(coeff) for op, coeff in bound.to_list()]
for op in ops:
psi = expm(-1j * op * dt).dot(psi)
observable_results.append(
[np.real(np.conj(psi).dot(obs).dot(psi)) for obs in observables]
)
return psi, observable_results
if __name__ == "__main__":
unittest.main()
|
https://github.com/matteoacrossi/oqs-jupyterbook
|
matteoacrossi
|
import numpy as np
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource, Slider, CustomJS, Text
from bokeh.plotting import Figure, show, output_file
from bokeh.io import output_notebook
def c1t(t, lam = 1., R = .25, c10 = 1.):
expt = lam * t / 2
if R == .5:
output = c10 * np.exp(-expt) * (1 + expt)
#elif R == 0:
# output = c10 * np.exp(-expt) * (np.cosh(expt * sqt) + np.sinh(expt*sqt) / sqt)
elif R < .5:
sqt = np.sqrt(1-2*R)
output = c10 * np.exp(-expt) * (np.cosh(expt * sqt) + np.sinh(expt*sqt) / sqt)
elif R > .5:
sqt = np.sqrt(-1+2*R)
output = c10 * np.exp(-expt) * (np.cos(expt * sqt) + np.sin(expt*sqt) / sqt)
return output
def lorentzian_J(w, R = .25, omega_0 = 0.):
# assume gamma_0 = 1
if R == 0.:
return 1
lam = 1. / R
output = 1/(2*np.pi) * lam**2 / ((omega_0 - w)**2 + lam**2)
return output
ts = [t*0.02 for t in range(0, 500)]
Rrange = [r*.02 for r in range(0,int(1/.02))] + [r * .1 for r in range(10,100)] + [r for r in range(10,100+1)]
Rrange = np.array(Rrange)
Rrange_str = [str(i) for i in range(len(Rrange))]
Rrange_str_R = ['R = {:.2f}'.format(R) for R in Rrange] # truncate to two decimals
# make a dictionary of form {'0': 0.0, '1': 0.2, .. }
Rrange_dict = {Rrange_str[i]:Rrange.round(2)[i] for i,_ in enumerate(Rrange)} # rounding to two decimals
ys = {r_str:[c1t(t, R = Rrange[int(r_str)])**2 for t in ts] for r_str in Rrange_str}
#ys = {r_str:[c1t(t, R = Rrange_dict[r_str])**2 for t in ts] for r_str in Rrange_str}
initial_r = Rrange_str[len(Rrange)//2]
ws = [t*0.02 for t in range(-250, 250)]
js = {r_str:[lorentzian_J(w, R = Rrange[int(r_str)]) for w in ws] for r_str in Rrange_str}
rs = {Rrange_str[i] : [Rrange_str_R[i]] for i,_ in enumerate(Rrange)}
# Wrap the data in two ColumnDataSources
source_visible = ColumnDataSource(data=dict(
x = ts, y = ys[initial_r]))
source_available = ColumnDataSource(data=ys)
# Wrap the data in two ColumnDataSources
source_visible2 = ColumnDataSource(data=dict(
x = ws, y = js[initial_r]))
source_available2 = ColumnDataSource(data=js)
# Define plot elements
plot = Figure(plot_width=300, plot_height=300, x_range=(-.1, 10), y_range=(-.01, 1))
plot.line('x', 'y', source=source_visible, legend_label="Οββ(t)", line_width=3, line_alpha=0.6)
plot2 = Figure(plot_width=300, plot_height=300, x_range=(-5, 5), y_range=(-.001, .2))
plot2.line('x', 'y', source=source_visible2, legend_label="J(Ο)",
line_width=3, line_alpha=0.6, line_color="#f01001")
# Add text
text_source = ColumnDataSource({'r_value': ['%s' % Rrange_str_R[1]]})
r_available = ColumnDataSource(data=rs)
text = Text(x=9.5, y=.7, text='r_value', text_font_size='15pt', text_align='right')
plot.add_glyph(text_source, text)
# Add slider
slider = Slider(value=int(initial_r),
start=np.min([int(i) for i in ys.keys()]),
end=np.max([int(i) for i in ys.keys()]),
step=1,
show_value = False,
title = 'R')
# Define CustomJS callback, which updates the plot based on selected function
# by updating the source_visible ColumnDataSource.
slider.callback = CustomJS(
args=dict(source_visible=source_visible,
source_available=source_available,
source_visible2=source_visible2,
source_available2=source_available2,
text_source = text_source,
r_available = r_available), code="""
var r_idx = cb_obj.value;
// Get the data from the data sources
var data_visible = source_visible.data;
var data_available = source_available.data;
var data_visible2 = source_visible2.data;
var data_available2 = source_available2.data;
// Change y-axis data according to the selected value
data_visible.y = data_available[r_idx];
data_visible2.y = data_available2[r_idx];
// text
text_source.data = {'r_value': [String(r_available.data[r_idx])]};
// Update the plot
source_visible.change.emit();
source_visible2.change.emit();
""")
layout = row(column(plot,slider), plot2)
output_file("jaynescummings.html", title="Jaynes-Cummings Model")
output_notebook()
show(layout)
|
https://github.com/mmetcalf14/Hamiltonian_Downfolding_IBM
|
mmetcalf14
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Functions used for the analysis of randomized benchmarking results.
"""
from scipy.optimize import curve_fit
import numpy as np
from qiskit import QiskitError
from ..tomography import marginal_counts
from ...characterization.fitters import build_counts_dict_from_list
try:
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
class RBFitter:
"""
Class for fitters for randomized benchmarking
"""
def __init__(self, backend_result, cliff_lengths,
rb_pattern=None):
"""
Args:
backend_result: list of results (qiskit.Result).
cliff_lengths: the Clifford lengths, 2D list i x j where i is the
number of patterns, j is the number of cliffords lengths
rb_pattern: the pattern for the rb sequences.
"""
if rb_pattern is None:
rb_pattern = [[0]]
self._cliff_lengths = cliff_lengths
self._rb_pattern = rb_pattern
self._raw_data = []
self._ydata = []
self._fit = []
self._nseeds = 0
self._result_list = []
self.add_data(backend_result)
@property
def raw_data(self):
"""Return raw data."""
return self._raw_data
@property
def cliff_lengths(self):
"""Return clifford lengths."""
return self.cliff_lengths
@property
def ydata(self):
"""Return ydata (means and std devs)."""
return self._ydata
@property
def fit(self):
"""Return fit."""
return self._fit
@property
def seeds(self):
"""Return the number of loaded seeds."""
return self._nseeds
@property
def results(self):
"""Return all the results."""
return self._result_list
def add_data(self, new_backend_result, rerun_fit=True):
"""
Add a new result. Re calculate the raw data, means and
fit.
Args:
new_backend_result: list of rb results
rerun_fit: re caculate the means and fit the result
Additional information:
Assumes that 'result' was executed is
the output of circuits generated by randomized_becnhmarking_seq,
"""
if new_backend_result is None:
return
if not isinstance(new_backend_result, list):
new_backend_result = [new_backend_result]
for result in new_backend_result:
self._result_list.append(result)
# update the number of seeds *if* new ones
# added. Note, no checking if we've done all the
# cliffords
for rbcirc in result.results:
nseeds_circ = int(rbcirc.header.name.split('_')[-1])
if (nseeds_circ+1) > self._nseeds:
self._nseeds = nseeds_circ+1
for result in self._result_list:
if not len(result.results) == len(self._cliff_lengths[0]):
raise ValueError(
"The number of clifford lengths must match the number of "
"results")
if rerun_fit:
self.calc_data()
self.calc_statistics()
self.fit_data()
@staticmethod
def _rb_fit_fun(x, a, alpha, b):
"""Function used to fit rb."""
# pylint: disable=invalid-name
return a * alpha ** x + b
def calc_data(self):
"""
Retrieve probabilities of success from execution results. Outputs
results into an internal variable _raw_data which is a 3-dimensional
list, where item (i,j,k) is the probability to measure the ground state
for the set of qubits in pattern "i" for seed no. j and vector length
self._cliff_lengths[i][k].
Additional information:
Assumes that 'result' was executed is
the output of circuits generated by randomized_becnhmarking_seq,
"""
circ_counts = {}
circ_shots = {}
for seedidx in range(self._nseeds):
for circ, _ in enumerate(self._cliff_lengths[0]):
circ_name = 'rb_length_%d_seed_%d' % (circ, seedidx)
count_list = []
for result in self._result_list:
try:
count_list.append(result.get_counts(circ_name))
except (QiskitError, KeyError):
pass
circ_counts[circ_name] = \
build_counts_dict_from_list(count_list)
circ_shots[circ_name] = sum(circ_counts[circ_name].values())
self._raw_data = []
startind = 0
for patt_ind in range(len(self._rb_pattern)):
string_of_0s = ''
string_of_0s = string_of_0s.zfill(len(self._rb_pattern[patt_ind]))
self._raw_data.append([])
endind = startind+len(self._rb_pattern[patt_ind])
for i in range(self._nseeds):
self._raw_data[-1].append([])
for k, _ in enumerate(self._cliff_lengths[patt_ind]):
circ_name = 'rb_length_%d_seed_%d' % (k, i)
counts_subspace = marginal_counts(
circ_counts[circ_name],
np.arange(startind, endind))
self._raw_data[-1][i].append(
counts_subspace.get(string_of_0s, 0)
/ circ_shots[circ_name])
startind += (endind)
def calc_statistics(self):
"""
Extract averages and std dev from the raw data (self._raw_data).
Assumes that self._calc_data has been run. Output into internal
_ydata variable:
ydata is a list of dictionaries (length number of patterns).
Dictionary ydata[i]:
ydata[i]['mean'] is a numpy_array of length n;
entry j of this array contains the mean probability of
success over seeds, for vector length
self._cliff_lengths[i][j].
And ydata[i]['std'] is a numpy_array of length n;
entry j of this array contains the std
of the probability of success over seeds,
for vector length self._cliff_lengths[i][j].
"""
self._ydata = []
for patt_ind in range(len(self._rb_pattern)):
self._ydata.append({})
self._ydata[-1]['mean'] = np.mean(self._raw_data[patt_ind], 0)
if len(self._raw_data[patt_ind]) == 1: # 1 seed
self._ydata[-1]['std'] = None
else:
self._ydata[-1]['std'] = np.std(self._raw_data[patt_ind], 0)
def fit_data(self):
"""
Fit the RB results to an exponential curve.
Fit each of the patterns
Puts the results into a list of fit dictionaries:
where each dictionary corresponds to a pattern and has fields:
'params' - three parameters of rb_fit_fun. The middle one is the
exponent.
'err' - the error limits of the parameters.
'epc' - error per Clifford
"""
self._fit = []
for patt_ind, (lens, qubits) in enumerate(zip(self._cliff_lengths,
self._rb_pattern)):
# if at least one of the std values is zero, then sigma is replaced
# by None
if not self._ydata[patt_ind]['std'] is None:
sigma = self._ydata[patt_ind]['std'].copy()
if len(sigma) - np.count_nonzero(sigma) > 0:
sigma = None
else:
sigma = None
params, pcov = curve_fit(self._rb_fit_fun, lens,
self._ydata[patt_ind]['mean'],
sigma=sigma,
p0=(1.0, 0.95, 0.0),
bounds=([-2, 0, -2], [2, 1, 2]))
alpha = params[1] # exponent
params_err = np.sqrt(np.diag(pcov))
alpha_err = params_err[1]
nrb = 2 ** len(qubits)
epc = (nrb-1)/nrb*(1-alpha)
epc_err = epc*alpha_err/alpha
self._fit.append({'params': params, 'params_err': params_err,
'epc': epc, 'epc_err': epc_err})
def plot_rb_data(self, pattern_index=0, ax=None,
add_label=True, show_plt=True):
"""
Plot randomized benchmarking data of a single pattern.
Args:
pattern_index: which RB pattern to plot
ax (Axes or None): plot axis (if passed in).
add_label (bool): Add an EPC label
show_plt (bool): display the plot.
Raises:
ImportError: If matplotlib is not installed.
"""
fit_function = self._rb_fit_fun
if not HAS_MATPLOTLIB:
raise ImportError('The function plot_rb_data needs matplotlib. '
'Run "pip install matplotlib" before.')
if ax is None:
plt.figure()
ax = plt.gca()
xdata = self._cliff_lengths[pattern_index]
# Plot the result for each sequence
for one_seed_data in self._raw_data[pattern_index]:
ax.plot(xdata, one_seed_data, color='gray', linestyle='none',
marker='x')
# Plot the mean with error bars
ax.errorbar(xdata, self._ydata[pattern_index]['mean'],
yerr=self._ydata[pattern_index]['std'],
color='r', linestyle='--', linewidth=3)
# Plot the fit
ax.plot(xdata,
fit_function(xdata, *self._fit[pattern_index]['params']),
color='blue', linestyle='-', linewidth=2)
ax.tick_params(labelsize=14)
ax.set_xlabel('Clifford Length', fontsize=16)
ax.set_ylabel('Ground State Population', fontsize=16)
ax.grid(True)
if add_label:
bbox_props = dict(boxstyle="round,pad=0.3",
fc="white", ec="black", lw=2)
ax.text(0.6, 0.9,
"alpha: %.3f(%.1e) EPC: %.3e(%.1e)" %
(self._fit[pattern_index]['params'][1],
self._fit[pattern_index]['params_err'][1],
self._fit[pattern_index]['epc'],
self._fit[pattern_index]['epc_err']),
ha="center", va="center", size=14,
bbox=bbox_props, transform=ax.transAxes)
if show_plt:
plt.show()
|
https://github.com/arian-code/nptel_quantum_assignments
|
arian-code
|
from sympy import *
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum import TensorProduct
import numpy as np
init_printing(use_unicode=True)
#One Q bit gates
H=Matrix([[1/sqrt(2), 1/sqrt(2)],[1/sqrt(2), -1/sqrt(2)]])
S=Matrix([[1, 0],[0, I]])
X=Matrix([[0, 1],[1, 0]])
Y=Matrix([[0, -I],[I, 0]])
Z=Matrix([[1, 0],[0, -1]])
Identity=Matrix([[1, 0],[0, 1]])
H, S, X, Y, Z, Identity
#Two Q bit gates
CNOT=Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
CNOT
HI=TensorProduct(H, Identity) #Example only
CNOT, HI
circuit=CNOT*TensorProduct(S, Identity)*TensorProduct(H, Identity)
circuit
in_1=Matrix([[0],[0],[0],[1]])
in_1
lhs=circuit*in_1
lhs
#option A
rhs=Matrix([[0],[1/sqrt(2)],[1/sqrt(2)],[0]])
print (lhs)
print (rhs)
if (lhs==rhs):
print ("It is true")
else:
print ("It is false")
#option B
rhs=Matrix([[0],[1/sqrt(2)],[-I/sqrt(2)],[0]])
print (lhs)
print (rhs)
if (lhs==rhs):
print ("It is true")
else:
print ("It is false")
#option C
rhs=Matrix([[0],[1/sqrt(2)],[-1/sqrt(2)],[0]])
print (lhs)
print (rhs)
if (lhs==rhs):
print ("It is true")
else:
print ("It is false")
#option D
rhs=Matrix([[1/sqrt(2)],[0],[0],[1/sqrt(2)]])
print (lhs)
print (rhs)
if (lhs==rhs):
print ("It is true")
else:
print ("It is false")
|
https://github.com/jonasmaziero/computacao_quantica_qiskit
|
jonasmaziero
|
from qiskit import QuantumCircuit
def qc_state_prep():
qc = QuantumCircuit(2, name = 'state_prep');
qc.h(0)
qc.cx(0, 1)
qc.z(0)
qc.x(1)
return qc
qc_state_prep_ = qc_state_prep(); qc_state_prep_.draw(output = 'mpl')
from qiskit import IBMQ, Aer, execute
nshots = 8192
provider = IBMQ.load_account()
device = provider.get_backend('ibmq_quito')
simulator = Aer.get_backend('qasm_simulator')
from qiskit.tools.monitor import job_monitor
from qiskit.ignis.verification.tomography import state_tomography_circuits, StateTomographyFitter
from qiskit.visualization import plot_state_city
qstc = state_tomography_circuits(qc_state_prep_, [0, 1])
job = execute(qstc, backend = simulator, shots = nshots)
qstf = StateTomographyFitter(job.result(), qstc)
rho = qstf.fit(method = 'lstsq')
plot_state_city(rho, title = r'$|\Psi_{-}^{sim}\rangle$')
import math
def qc_A1B1():
qc = QuantumCircuit(2, name = 'A1B1'); qc.z(0); qc.u(math.pi/4, 0, math.pi, [1])
return qc
qc_A1B1_ = qc_A1B1(); qc_A1B1_.draw(output = 'mpl')
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc.barrier()
qc_A1B1_ = qc_A1B1(); qc.append(qc_A1B1_, [0, 1]); qc.measure([0, 1], [0, 1])
qc.draw(output = 'mpl')
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); print(resultS , ',', resultS['11'])
def AB_avg(result, nshots):
avg = 0
if '00' in result:
avg += result['00']
if '01' in result:
avg -= result['01']
if '10' in result:
avg -= result['10']
if '11' in result:
avg += result['11']
return avg/nshots
A1B1_avg = AB_avg(resultS, nshots); print(A1B1_avg)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A1B1_avgE = AB_avg(jobE.result().get_counts(), nshots)
print('<A1B1>_teo = ', -1/math.sqrt(2), ', <A1B1>_sim = ', A1B1_avg, ', <A1B1>_exp = ', A1B1_avgE)
def qc_A1B2():
qc = QuantumCircuit(2, name = 'A1B2'); qc.z(0); qc.u(-math.pi/4, 0, math.pi, [1])
return qc
qc_A1B2_ = qc_A1B2(); qc_A1B2_.draw(output = 'mpl')
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A1B2_ = qc_A1B2(); qc.append(qc_A1B2_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts()
A1B2_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A1B2_avgE = AB_avg(jobE.result().get_counts(), nshots)
print('<A1B2>_teo = ', -1/math.sqrt(2), ', <A1B2>_sim = ', A1B2_avg, ', <A1B2>_exp = ', A1B2_avgE)
def qc_A2B1():
qc = QuantumCircuit(2, name = 'A2B1'); qc.h(0); qc.u(math.pi/4, 0, math.pi, [1])
return qc
qc_A2B1_ = qc_A2B1(); qc_A2B1_.draw(output = 'mpl')
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A2B1_ = qc_A2B1(); qc.append(qc_A2B1_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts()
A2B1_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A2B1_avgE = AB_avg(jobE.result().get_counts(), nshots)
print('<A2B1>_teo = ', -1/math.sqrt(2), ', <A2B1>_sim = ', A2B1_avg, ', <A2B1>_exp = ', A2B1_avgE)
def qc_A2B2():
qc = QuantumCircuit(2, name = 'A2B2'); qc.h(0); qc.u(-math.pi/4, 0, math.pi, [1])
return qc
qc_A2B2_ = qc_A2B2(); qc_A2B2_.draw(output = 'mpl')
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A2B2_ = qc_A2B2(); qc.append(qc_A2B2_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts()
A2B2_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A2B2_avgE = AB_avg(jobE.result().get_counts(), nshots)
print('<A2B2>_teo = ', 1/math.sqrt(2), ', <A2B2>_sim = ', A2B2_avg, ', <A2B2>_exp = ', A2B2_avgE)
O_avg = A1B1_avg + A1B2_avg + A2B1_avg - A2B2_avg
O_avgE = A1B1_avgE + A1B2_avgE + A2B1_avgE - A2B2_avgE
print('<O>_teo = ', -2*math.sqrt(2), ', <O>_sim = ', O_avg, ', <O>_exp = ', O_avgE)
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
def qc_state_prep(th):
qc = QuantumCircuit(2, name = 'Psi')
qc.u(th, 0, 0, [0]); qc.cx([0], [1]); qc.z([0]); qc.x([1])
return qc
qc_state_prep_ = qc_state_prep(math.pi); qc_state_prep_.draw(output = 'mpl')
p = np.arange(0, math.pi/2+0.1, 0.1); Oavg = -math.sqrt(2)*(1+np.sin(p))
d = p.shape[0]; y = -2*np.ones(d); Oavg_sim = np.zeros(d); Oavg_exp = np.zeros(d)
for j in range(0, d):
# A1B1
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(p[j]); qc.append(qc_state_prep_, [0, 1])
qc_A1B1_ = qc_A1B1(); qc.append(qc_A1B1_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A1B1_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A1B1_avgE = AB_avg(jobE.result().get_counts(), nshots)
# A1B2
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(p[j]); qc.append(qc_state_prep_, [0, 1])
qc_A1B2_ = qc_A1B2(); qc.append(qc_A1B2_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A1B2_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A1B2_avgE = AB_avg(jobE.result().get_counts(), nshots)
# A2B1
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(p[j]); qc.append(qc_state_prep_, [0, 1])
qc_A2B1_ = qc_A2B1(); qc.append(qc_A2B1_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A2B1_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A2B1_avgE = AB_avg(jobE.result().get_counts(), nshots)
# A2B2
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(p[j]); qc.append(qc_state_prep_, [0, 1])
qc_A2B2_ = qc_A2B2(); qc.append(qc_A2B2_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A2B2_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A2B2_avgE = AB_avg(jobE.result().get_counts(), nshots)
# O
Oavg_sim[j] = A1B1_avg + A1B2_avg + A2B1_avg - A2B2_avg
Oavg_exp[j] = A1B1_avgE + A1B2_avgE + A2B1_avgE - A2B2_avgE
matplotlib.rcParams.update({'font.size':12})
plt.figure(figsize = (6,4), dpi = 100)
plt.plot(p, y, label = 'LHV limit')
plt.plot(p, Oavg, label = r'$\langle O\rangle_{\Psi}^{teo}$')
plt.plot(p, Oavg_sim, '*', label = r'$\langle O\rangle_{\Psi}^{sim}$')
plt.plot(p, Oavg_exp, 'o', label = r'$\langle O\rangle_{\Psi}^{exp}$')
plt.xlabel(r'$\theta$'); plt.legend(); plt.show()
def qc_A1B1(th):
qc = QuantumCircuit(2, name = 'A1B1'); qc.z(0); qc.u(th, 0, math.pi, [1])
return qc
def qc_A1B2():
qc = QuantumCircuit(2, name = 'A1B2'); qc.z(0); qc.u(-math.pi/4, 0, math.pi, [1])
return qc
def qc_A2B1(th):
qc = QuantumCircuit(2, name = 'A2B1'); qc.h(0); qc.u(th, 0, math.pi, [1])
return qc
def qc_A2B2():
qc = QuantumCircuit(2, name = 'A2B2'); qc.h(0); qc.u(-math.pi/4, 0, math.pi, [1])
return qc
def qc_state_prep():
qc = QuantumCircuit(2, name = 'state_prep'); qc.h(0); qc.cx(0, 1); qc.z(0); qc.x(1)
return qc
th = np.arange(-math.pi/4, math.pi/4+0.1, 0.1); Oavg = -(math.sqrt(2)+np.cos(th)+np.sin(th))
d = p.shape[0]; y = -2*np.ones(d); Oavg_sim = np.zeros(d); Oavg_exp = np.zeros(d)
for j in range(0, d):
# A1B1
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A1B1_ = qc_A1B1(th[j]); qc.append(qc_A1B1_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A1B1_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A1B1_avgE = AB_avg(jobE.result().get_counts(), nshots)
# A1B2
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A1B2_ = qc_A1B2(); qc.append(qc_A1B2_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A1B2_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A1B2_avgE = AB_avg(jobE.result().get_counts(), nshots)
# A2B1
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A2B1_ = qc_A2B1(th[j]); qc.append(qc_A2B1_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A2B1_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A2B1_avgE = AB_avg(jobE.result().get_counts(), nshots)
# A2B2
qc = QuantumCircuit(2, 2)
qc_state_prep_ = qc_state_prep(); qc.append(qc_state_prep_, [0, 1])
qc_A2B2_ = qc_A2B2(); qc.append(qc_A2B2_, [0, 1]); qc.measure([0, 1], [0, 1])
jobS = execute(qc, backend = simulator, shots = nshots)
resultS = jobS.result().get_counts(); A2B2_avg = AB_avg(resultS, nshots)
jobE = execute(qc, backend = device, shots = nshots); job_monitor(jobE)
A2B2_avgE = AB_avg(jobE.result().get_counts(), nshots)
# O
Oavg_sim[j] = A1B1_avg + A1B2_avg + A2B1_avg - A2B2_avg
Oavg_exp[j] = A1B1_avgE + A1B2_avgE + A2B1_avgE - A2B2_avgE
matplotlib.rcParams.update({'font.size':12})
plt.figure(figsize = (6,4), dpi = 100)
plt.plot(th, y, label = 'LHV limit')
plt.plot(th, Oavg, label = r'$\langle O\rangle_{\Psi}^{teo}$')
plt.plot(th, Oavg_sim, '*', label = r'$\langle O\rangle_{\Psi}^{sim}$')
plt.plot(th, Oavg_exp, 'o', label = r'$\langle O\rangle_{\Psi}^{exp}$')
plt.xlabel(r'$\theta$'); plt.legend(); plt.show()
def chsh(rho):
cm = zeros(3,3); cm = corr_mat(2, 2, rho)
evals = zeros(3,1); W = zeros(3,1); W = eVals(3, cm)
return max(0,(sqrt(W[0]**2+W[1]**2+W[2]**2-min(W[0],W[1],W[2])**2)-1)/(sqrt(2)-1))
|
https://github.com/Raijeku/qmeans
|
Raijeku
|
"""Module for quantum k-means algorithm with a class containing sk-learn style functions resembling
the k-means algorithm.
This module contains the QuantumKMeans class for clustering according to euclidian distances
calculated by running quantum circuits.
Typical usage example::
import numpy as np
import pandas as pd
from qmeans.qmeans import *
backend = AerSimulator()
X = pd.DataFrame(np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]))
q_means = QuantumKMeans(backend, n_clusters=2, verbose=True)
q_means.fit(X)
print(q_means.labels_)
"""
from typing import Tuple
import numpy as np
import pandas as pd
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, transpile
from qiskit_aer import AerSimulator
from qiskit.providers import Backend
from sklearn.preprocessing import normalize, scale
from sklearn.utils import check_random_state
from sklearn.utils. extmath import stable_cumsum
from sklearn.base import BaseEstimator
from qiskit_aer.noise import NoiseModel
def preprocess(points: np.ndarray, map_type: str ='angle', norm_relevance: bool = False):
"""Preprocesses data points according to a type criteria.
The algorithm scales the data points if the type is 'angle' and normalizes the data points
if the type is 'probability'.
Args:
points: The input data points.
map_type: {'angle', 'probability'} Specifies the type of data encoding.
'angle': Uses U3 gates with its theta angle being the phase angle of the complex data
point.
'probability': Relies on data normalization to preprocess the data to acquire a norm of
1.
norm_relevance: If true, maps two-dimensional data onto 2 angles, one for the angle between
both data points and another for the magnitude of the data points.
Returns:
p_points: Preprocessed points.
"""
if map_type == 'angle':
p_points = scale(points[:])
a_points = points.copy()
if norm_relevance is True:
for i, point in enumerate(a_points):
if np.array_equiv(point, np.zeros_like(point)):
point = np.ones_like(point)*((1/a_points.shape[1])**(1/2))
a_points[i] = point
_, norms = normalize(a_points[:], return_norm=True)
#norms = np.sqrt(p_points[:,0]**2+p_points[:,1]**2)
max_norm = np.max(norms)
new_column = norms/max_norm
new_column = new_column.reshape((new_column.size,1))
p_points = np.concatenate((p_points, new_column),axis=1)
return p_points
elif map_type == 'probability':
"""if len(points.shape) > 1:
size = points.shape[1]
else:
size = points.shape[0]"""
"""print("pre points")
print(points)
print(type(points))
#i = 0
points = points.to_numpy()
for i, point in enumerate(points):
if np.array_equiv(point, np.zeros_like(point)):
point = np.ones_like(point)*((1/points.shape[1])**(1/2))
points[i] = point
print(point)
#i += 1
print("post points")
print(points)
points = pd.DataFrame(points)"""
p_points, norms = normalize(points[:], return_norm=True)
return p_points, norms
def distance(x: np.ndarray, y: np.ndarray, backend: Backend, map_type: str = 'probability', shots: int = 1024, norms: np.ndarray = np.array([1, 1]), norm_relevance: bool = False, noise_model: NoiseModel = None):
"""Finds the distance between two data points by mapping the data points onto qubits using
amplitude or angle encoding and then using a swap test.
The algorithm performs angle encoding if the type is 'angle' and amplitude encoding if the type
is 'probability'.
Args:
x: The first data point.
y: The second data point.
backend: IBM quantum device to calculate the distance with.
map_type: {'angle', 'probability'} Specify the type of data encoding.
'angle': Uses U3 gates with its theta angle being the phase angle of the complex data
point.
'probability': Relies on data normalization to preprocess the data to acquire a norm of
1.
shots: Number of repetitions of each circuit, for sampling.
norm_relevance: If true, maps two-dimensional data onto 2 angles, one for the angle between
both data points and another for the magnitude of the data points.
noise_model: Noise model to use when runnings circuits on a simulator.
Returns:
distance: Distance between the two data points.
"""
if map_type == 'angle':
if x.size == 2:
qubits = int(np.ceil(np.log2(x.size)))
#print("x is")
#print(x)
#x = x.values
#print(x)
#print("y is")
#print(y)
#y = y.values
#print(y)
complexes_x = x[0] + 1j*x[1]
complexes_y= y[0] + 1j*y[1]
theta_1 = np.angle(complexes_x)
theta_2 = np.angle(complexes_y)
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.h(qr[0])
qc.h(qr[1])
qc.h(qr[2])
qc.u(theta_1, np.pi, np.pi, qr[1])
qc.u(theta_2, np.pi, np.pi, qr[2])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qc = transpile(qc, backend)
job = backend.run(qc, shots=shots, noise_model=noise_model)
result = job.result()
data = result.get_counts()
if len(data)==1:
return 0.0
else: return data['0'*(qubits*2)+'1']/shots
elif x.size == 3 and norm_relevance is True:
qubits = int(np.ceil(np.log2(x.size)))
complexes_x = x[0] + 1j*x[1]
complexes_y= y[0] + 1j*y[1]
theta_1 = np.angle(complexes_x)
theta_2 = np.angle(complexes_y)
ro_1 = x[2]*np.pi
ro_2 = y[2]*np.pi
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.h(qr[0])
qc.h(qr[1])
qc.h(qr[2])
qc.u(theta_1, np.pi, np.pi, qr[1])
qc.u(ro_1, 0, 0, qr[1])
qc.u(theta_2, np.pi, np.pi, qr[2])
qc.u(ro_2, 0, 0, qr[1])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qc = transpile(qc, backend)
job = backend.run(qc, shots=shots, noise_model=noise_model)
result = job.result()
data = result.get_counts()
if len(data)==1: return 0.0
else:
return data['0'*(qubits)+'1']/shots
elif map_type == 'probability':
if x.size > 1:
qubits = int(np.ceil(np.log2(x.size)))
else:
qubits = 1
#print(y)
n_x = np.zeros(2**qubits)
n_x[:x.size] = x
n_y = np.zeros(2**qubits)
n_y[:y.size] = y
qr = QuantumRegister(2*qubits + 1, name="qr")
cr = ClassicalRegister(2*qubits + 1, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
#print(n_x)
#print((n_x**2).sum())
qc.initialize(n_x,[i+1 for i in range(qubits)]) # pylint: disable=no-member
qc.initialize(n_y,[i+1+qubits for i in range(qubits)]) # pylint: disable=no-member
qc.h(qr[0])
for i in range(qubits):
qc.cswap(qr[0], qr[1+i], qr[qubits+1+i])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qc = transpile(qc, backend)
job = backend.run(qc, shots=shots, noise_model=noise_model)
result = job.result()
data = result.get_counts()
if len(data)==1:
return 0.0
else:
M = data['0'*(qubits*2)+'1']/shots
return (norms[0]**2 + norms[1] ** 2 - 2*norms[0]*norms[1]*((1 - 2*M)**(1/2)))**(1/2)
def batch_separate(X: np.ndarray, clusters: np.ndarray, max_experiments: int, norms: np.ndarray, cluster_norms: np.ndarray):
"""Creates batches of pairs of vectors.
Separates data points X and cluster centers into a number of batches of elements for distance
calculations in a single job. Each batch contains a set of data points and cluster centers,
corresponding to the data for distance measurements in each batch.
Args:
X: Training instances to cluster.
clusters: Cluster centers.
max_experiments: The amount of distance measurements in each batch.
Returns:
B: Batches with pairs of data points and cluster centers.
"""
if X.shape[0] > clusters.shape[0]:
if X.shape[0] % max_experiments == 0:
batches_X = np.asarray(np.split(X,[i*max_experiments for i in range(1,X.shape[0]//max_experiments)]))
batches_norms_X = np.asarray(np.split(norms, [i*max_experiments for i in range(1, norms.shape[0]//max_experiments)]))
else:
batches_X = np.asarray(np.split(X,[i*max_experiments for i in range(1,X.shape[0]//max_experiments + 1)]))
batches_norms_X = np.asarray(np.split(norms, [i*max_experiments for i in range(1, norms.shape[0]//max_experiments + 1)]))
#print("batches_X:",batches_X)
#print(batches_X.shape)
#print("clusters:",clusters)
#print(clusters.shape)
if X.shape[0] % max_experiments == 0:
batches_clusters = np.empty([(X.shape[0]//max_experiments)*clusters.shape[0],clusters.shape[1]], dtype=clusters.dtype)
batches_norms_clusters = np.empty([(X.shape[0]//max_experiments)*cluster_norms.shape[0],1], dtype=cluster_norms.dtype)
else:
batches_clusters = np.empty([(X.shape[0]//max_experiments + 1)*clusters.shape[0],clusters.shape[1]], dtype=clusters.dtype)
batches_norms_clusters = np.empty([(X.shape[0]//max_experiments + 1)*cluster_norms.shape[0],1], dtype=cluster_norms.dtype)
for i in range(clusters.shape[0]):
batches_clusters[i::clusters.shape[0]] = clusters[i]
batches_norms_clusters[i::cluster_norms.shape[0]] = cluster_norms[i]
#print("batches_clusters:",batches_clusters)
#print(batches_clusters.shape)
batches_X = np.asarray(np.repeat(batches_X,clusters.shape[0],axis=0))
batches_norms_X = np.asarray(np.repeat(batches_norms_X,cluster_norms.shape[0],axis=0))
#print("batches_X:",batches_X)
#print(batches_X.shape)
batches = ([(batches_X[i], batches_clusters[i]) for i in range(batches_clusters.shape[0])], [(batches_norms_X[i], batches_norms_clusters[i]) for i in range(batches_norms_clusters.shape[0])])
return batches
else:
raise NotImplementedError
def batch_distance(B: Tuple[np.ndarray, np.ndarray], backend: Backend, norm_B: np.ndarray, map_type: str = 'angle', shots: int = 1024):
"""Finds the distance between pairs of data points and cluster centers inside a batch by
mapping the data points onto qubits using amplitude or angle encoding and then using a swap test.
The algorithm performs angle encoding if the type is 'angle' and amplitude encoding if the type
is 'probability'.
Args:
B: The batch of X data points and y cluster centers.
backend: IBM quantum device to calculate the distance with.
map_type: {'angle', 'probability'} Specifies the type of data encoding.
'angle': Uses U3 gates with its theta angle being the phase angle of the complex data
point.
'probability': Relies on data normalization to preprocess the data to acquire a norm of
1.
shots: Number of repetitions of each circuit, for sampling.
Returns:
distance: Distance between the data points and cluster centers of the batch.
"""
if B[0].shape[1] == 2:
if map_type == 'angle':
qcs = []
for point in B[0]:
x = point
y = B[1]
complexes_x = x[0] + 1j*x[1]
complexes_y= y[0] + 1j*y[1]
theta_1 = np.angle(complexes_x)
theta_2 = np.angle(complexes_y)
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.h(qr[0])
qc.h(qr[1])
qc.h(qr[2])
qc.u(theta_1, np.pi, np.pi, qr[1])
qc.u(theta_2, np.pi, np.pi, qr[2])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
return [batch_data['001']/shots if len(batch_data)!=1 else 0.0 for batch_data in data]
elif map_type == 'probability':
qcs = []
for point in B[0]:
x = point
y = B[1]
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.initialize(x,1) # pylint: disable=no-member
qc.initialize(y,2) # pylint: disable=no-member
qc.h(qr[0])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
contained = ['0'*2+'1' in batch_data for batch_data in data]
M = [data[i]['0'*2+'1']/shots if contained[i] is True else 0.0 for i in range(len(contained))]
return [(norm_B[0][i]**2 + norm_B[1]**2 -2*norm_B[0][i]*norm_B[1]*((1 - 2*M_i)**(1/2)))**(1/2) for i, M_i in enumerate(M)]
elif B[0].shape[1] == 3:
if map_type == 'angle':
qcs = []
for point in B[0]:
x = point
y = B[1]
complexes_x = x[0] + 1j*x[1]
complexes_y= y[0] + 1j*y[1]
theta_1 = np.angle(complexes_x)
theta_2 = np.angle(complexes_y)
ro_1 = x[2]*np.pi/2
ro_2 = y[2]*np.pi/2
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.h(qr[0])
qc.h(qr[1])
qc.h(qr[2])
qc.u(theta_1, np.pi, np.pi, qr[1])
qc.u(ro_1, 0, 0, qr[1])
qc.u(theta_2, np.pi, np.pi, qr[2])
qc.u(ro_2, 0, 0, qr[2])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
return [batch_data['001']/shots if len(batch_data)!=1 else 0.0 for batch_data in data]
elif np.log2(B[0].shape[1]).is_integer():
if map_type == 'angle':
qcs = []
for point in B[0]:
x = point
y = B[1]
complexes_x = x[0] + 1j*x[1]
complexes_y= y[0] + 1j*y[1]
theta_1 = np.angle(complexes_x)
theta_2 = np.angle(complexes_y)
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.h(qr[0])
qc.h(qr[1])
qc.h(qr[2])
qc.u(theta_1, np.pi, np.pi, qr[1])
qc.u(theta_2, np.pi, np.pi, qr[2])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
return [batch_data['001']/shots if len(batch_data)!=1 else 0.0 for batch_data in data]
elif map_type == 'probability':
qcs = []
for point in B[0]:
x = point
y = B[1]
qr = QuantumRegister(int(np.log2(B[0].shape[1]))*2+1, name="qr")
cr = ClassicalRegister(int(np.log2(B[0].shape[1]))*2+1, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.initialize(x,[i+1 for i in range(int(np.log2(B[0].shape[1])))]) # pylint: disable=no-member
qc.initialize(y,[i+1+int(np.log2(B[0].shape[1])) for i in range(int(np.log2(B[0].shape[1])))]) # pylint: disable=no-member
qc.h(qr[0])
for i in range(int(np.log2(B[0].shape[1]))):
qc.cswap(qr[0], qr[1+i], qr[int(np.log2(B[0].shape[1])+1)+i])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
contained = ['0'*int(np.log2(B[0].shape[1]))*2+'1' in batch_data for batch_data in data]
M = [data[i]['0'*int(np.log2(B[0].shape[1]))*2+'1']/shots if contained[i] is True else 0.0 for i in range(len(contained))]
#print('norm_B is', norm_B)
#print('M is', M)
return [(norm_B[0][i]**2 + norm_B[1]**2 -2*norm_B[0][i]*norm_B[1]*((1 - 2*M_i)**(1/2)))**(1/2) for i, M_i in enumerate(M)]
else:
if map_type == 'angle':
qcs = []
for point in B[0]:
x = point
y = B[1]
complexes_x = x[0] + 1j*x[1]
complexes_y= y[0] + 1j*y[1]
theta_1 = np.angle(complexes_x)
theta_2 = np.angle(complexes_y)
qr = QuantumRegister(3, name="qr")
cr = ClassicalRegister(3, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.h(qr[0])
qc.h(qr[1])
qc.h(qr[2])
qc.u(theta_1, np.pi, np.pi, qr[1])
qc.u(theta_2, np.pi, np.pi, qr[2])
qc.cswap(qr[0], qr[1], qr[2])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
return [batch_data['001']/shots if len(batch_data)!=1 else 0.0 for batch_data in data]
elif map_type == 'probability':
qcs = []
for point in B[0]:
if np.log2(B[0].shape[1]).is_integer(): qubits = int(np.log2(B[0].shape[1]))
else: qubits = int(np.log2(B[0].shape[1])) + 1
x = np.zeros(2**qubits)
x[:point.shape[0]] = point
y = np.zeros(2**qubits)
y[:B[1].shape[0]] = B[1]
qr = QuantumRegister(qubits*2+1, name="qr")
cr = ClassicalRegister(qubits*2+1, name="cr")
qc = QuantumCircuit(qr, cr, name="k_means")
qc.initialize(x,[i+1 for i in range(qubits)]) # pylint: disable=no-member
qc.initialize(y,[i+1+qubits for i in range(qubits)]) # pylint: disable=no-member
qc.h(qr[0])
for i in qubits:
qc.cswap(qr[0], qr[1+i], qr[qubits+1+i])
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.reset(qr)
qcs.append(qc)
qcs = transpile(qcs, backend)
job = backend.run(qcs, shots=shots)
result = job.result()
data = result.get_counts()
contained = ['0'*qubits*2+'1' in batch_data for batch_data in data]
M = [data[i]['0'*qubits*2+'1']/shots if contained[i]==True else 0.0 for i in range(len(contained))]
return [(norm_B[0][i]**2 + norm_B[1]**2 -2*norm_B[0][i]*norm_B[1]*((1 - 2*M_i)**(1/2)))**(1/2) for i, M_i in enumerate(M)]
def batch_collect(batch_d: np.ndarray, desired_shape: Tuple[int, int]):
"""Collects batches of distances.
Retrieves batches of distances and transforms the shape of the data to a desired shape.
Args:
batch_d: Batches of distances.
desired_shape: The shape of the collected distances.
Returns:
final_batch_d: Transformed distances.
"""
#print('Batch d is', batch_d)
#print('Batch d shape is', batch_d.shape)
#print('Desired shape is', desired_shape)
final_batch_d = np.empty(batch_d.shape, dtype=batch_d.dtype)
#print('Final Batch D is', final_batch_d)
for i in range(batch_d.shape[0]//desired_shape[0]):
final_batch_d[i] = batch_d[desired_shape[0]*i]
#print('Final Batch D is', final_batch_d)
#print(batch_d.shape[0]//desired_shape[0], batch_d.shape[0])
#print(batch_d.shape, desired_shape)
if batch_d.squeeze(axis=-1).shape != desired_shape:
for i in range(batch_d.shape[0]//desired_shape[0],batch_d.shape[0]):
final_batch_d[i] = batch_d[desired_shape[0]*i-batch_d.shape[0]+1]
#print('Final Batch D is', final_batch_d)
return final_batch_d.reshape(desired_shape)
def batch_distances(X: np.ndarray, cluster_centers: np.ndarray, backend: Backend, map_type: str, shots: int, verbose: bool, norms: np.ndarray, cluster_norms: np.ndarray):
"""Batches data and calculates and collects distances.
Data is separated into batches, sent to the quantum device to calculate distances and the
distances are then collected from the results.
Args:
X: Training instances to cluster.
cluster_centers: Coordinates of cluster centers.
backend: IBM quantum device to run the quantum k-means algorithm on.
map_type: {'angle', 'probability'} Specifies the type of data encoding.
'angle': Uses U3 gates with its theta angle being the phase angle of the complex data
point.
'probability': Relies on data normalization to preprocess the data to acquire a norm of
1.
shots: Number of repetitions of each circuit, for sampling.
verbose: Defines if verbosity is active for deeper insight into the class processes.
Returns:
distance: Distance between the data points and cluster centers.
"""
#print('LOOK HERE OMG ----------------------------------------------------------------')
#print(norms)
#print('LOOK HERE CLUSTERS ----------------------------------------------------------------')
#print(cluster_norms)
#print('LOOK HERE END ----------------------------------------------------------------')
if isinstance(cluster_centers, pd.DataFrame):
batches, norm_batches = batch_separate(X.to_numpy(), cluster_centers.to_numpy(),backend.configuration().max_experiments, norms, cluster_norms)
else: batches, norm_batches = batch_separate(X.to_numpy(), cluster_centers,backend.configuration().max_experiments, norms, cluster_norms)
#if verbose: print('Batches are', batches)
#if verbose: print('Norm atches are', norm_batches)
distance_list = np.asarray([batch_distance(B,backend,norm_batches[i],map_type,shots) for i, B in enumerate(batches)])
#if verbose: print('Distance list is', distance_list)
distances = batch_collect(distance_list, (cluster_centers.shape[0],X.shape[0]))
#if verbose: print('Distances are', distances)
return distances
def qmeans_plusplus(X: np.ndarray, n_clusters: int, backend: Backend, map_type: str, verbose: bool, initial_center: str, shots: int = 1024, norms: np.ndarray = np.array([1,1]), batch: bool = True, x_squared_norms: np.ndarray = None, n_local_trials: int = None, random_state: int = None, noise_model: NoiseModel = None):
"""Init n_clusters seeds according to q-means++.
Selects initial cluster centers for q-mean clustering in a smart way to speed up convergence.
Args:
X: The data to pick seeds from.
n_clusters: The number of centroids to initialize.
backend: IBM quantum device to run the quantum k-means algorithm on.
map_type: {'angle', 'probability'} Specifies the type of data encoding.
'angle': Uses U3 gates with its theta angle being the phase angle of the complex data
point.
'probability': Relies on data normalization to preprocess the data to acquire a norm of
1.
verbose: Defines if verbosity is active for deeper insight into the class processes.
initial_center: {'random', 'far'} Speficies the strategy for setting the initial cluster
center.
'random': Assigns a random initial center.
'far': Specifies the furthest point as the initial center.
x_squared_norms: Squared Euclidean norm of each data point.
n_local_trials: The number of seeding trials for each center (except the first), of which
the one reducing inertia the most is greedily chosen. Set to None to make the number of
trials depend logarithmically on the number of seeds (2+log(k)).
random_state: Determines random number generation for centroid initialization. Pass an int
for reproducible output across multiple function calls.
noise_model: Noise model to use when runnings circuits on a simulator.
Returns:
centers: The initial centers for q-means.
indices: The index location of the chosen centers in the data array X. For a given index
and center, X[index] = center.
"""
if verbose:
print('Started Qmeans++')
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype= X.values.dtype)
if n_local_trials is None:
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state.randint(n_samples)
indices = np.full(n_clusters, -1, dtype=int)
indices[0] = center_id
centers[0] = X.values[center_id]
if verbose:
print('Centers are:', pd.DataFrame(centers))
if batch:
closest_distances = batch_distances(X, centers[0, np.newaxis], backend, map_type, shots, verbose)
else:
if map_type == 'probability':
closest_distances = np.asarray([[distance(point,centroid,backend,map_type,shots,np.array([norms[i],norms[j]]),noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in pd.DataFrame(centers[0, np.newaxis]).iterrows()])
elif map_type == 'angle':
closest_distances = np.asarray([[distance(point,centroid,backend,map_type,shots,noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in pd.DataFrame(centers[0, np.newaxis]).iterrows()])
current_pot = closest_distances.sum()
#if verbose:
# print('Closest distances are:', closest_distances)
for c in range(1, n_clusters):
if verbose:
print('Cluster center', c)
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_distances), rand_vals)
np.clip(candidate_ids, None, closest_distances.size - 1, out=candidate_ids)
if batch:
distance_to_candidates = batch_distances(X, X.values[candidate_ids], backend, map_type, shots, verbose)
else:
if map_type == 'probability':
distance_to_candidates = np.asarray([[distance(point,centroid,backend,map_type,shots,np.array([norms[i],norms[j]]),noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in X.iloc[candidate_ids].iterrows()])
elif map_type == 'angle':
distance_to_candidates = np.asarray([[distance(point,centroid,backend,map_type,shots,noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in X.iloc[candidate_ids].iterrows()])
np.minimum(closest_distances, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_distances = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
centers[c] = X.values[best_candidate]
indices[c] = best_candidate
if verbose:
print('Centers are:', pd.DataFrame(centers))
#if verbose: print('Closest distances are:', closest_distances)
if c == 1 and initial_center == 'far':
if batch:
closest_distances = batch_distances(X, centers[1, np.newaxis], backend, map_type, shots, verbose)
else:
if map_type == 'probability':
closest_distances = np.asarray([[distance(point,centroid,backend,map_type,shots,np.array([norms[i],norms[j]]),noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in pd.DataFrame(centers[1, np.newaxis]).iterrows()])
elif map_type == 'angle':
closest_distances = np.asarray([[distance(point,centroid,backend,map_type,shots,noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in pd.DataFrame(centers[1, np.newaxis]).iterrows()])
current_pot = closest_distances.sum()
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_distances), rand_vals)
np.clip(candidate_ids, None, closest_distances.size - 1, out=candidate_ids)
if batch:
distance_to_candidates = batch_distances(X, X.values[candidate_ids], backend, map_type, shots, verbose)
else:
if map_type == 'probability':
distance_to_candidates = np.asarray([[distance(point,centroid,backend,map_type,shots,np.array([norms[i],norms[j]]),noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in X.iloc[candidate_ids].iterrows()])
elif map_type == 'angle':
distance_to_candidates = np.asarray([[distance(point,centroid,backend,map_type,shots,noise_model=noise_model) for i, point in X.iterrows()] for j, centroid in X.iloc[candidate_ids].iterrows()])
np.minimum(closest_distances, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_distances = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
centers[0] = X.values[best_candidate]
indices[0] = best_candidate
if verbose:
print('Centers are:', pd.DataFrame(centers))
return centers, indices
class QuantumKMeans(BaseEstimator):
"""Quantum k-means clustering algorithm. This k-means alternative implements quantum machine
learning to calculate distances between data points and centroids using quantum circuits.
Args:
n_clusters: The number of clusters to use and the amount of centroids generated.
init: {'q-means++, 'random'}, callable or array-like of shape (n_clusters, n_features)
Method for initialization:
'q-means++' : selects initial cluster centers for q-mean clustering in a smart way to
speed up convergence.
'random': choose n_clusters observations (rows) at random from data for the initial
centroids.
If an array is passed, it should be of shape (n_clusters, n_features) and gives the initial
centers.
If a callable is passed, it should take arguments X, n_clusters and a random state and
return an initialization.
tol: Relative tolerance with regards to Frobenius norm of the difference in the cluster
centers of two consecutive iterations to declare convergence.
verbose: Defines if verbosity is active for deeper insight into the class processes.
max_iter: Maximum number of iterations of the quantum k-means algorithm for a single run.
backend: IBM quantum device to run the quantum k-means algorithm on.
map_type: {'angle', 'probability'} Specifies the type of data encoding.
'angle': Uses U3 gates with its theta angle being the phase angle of the complex data
point.
'probability': Relies on data normalization to preprocess the data to acquire a norm of
1.
shots: Number of repetitions of each circuit, for sampling.
norm_relevance: If true, maps two-dimensional data onto 2 angles, one for the angle between
both data points and another for the magnitude of the data points.
initial_center: {'random', 'far'} Speficies the strategy for setting the initial cluster
center.
'random': Assigns a random initial center.
'far': Specifies the furthest point as the initial center.
noise_model: Noise model to use when runnings circuits on a simulator.
Attributes:
cluster_centers_: Coordinates of cluster centers.
labels_: Centroid labels for each data point.
n_iter_: Number of iterations run before convergence.
"""
def __init__(self, backend: Backend = AerSimulator(), n_clusters: int = 2, init: str = 'random', tol: float = 0.0001, max_iter: int = 300, verbose: bool = False, map_type: str = 'probability', shots: int = 1024, norm_relevance: bool = False, initial_center: str = 'random', noise_model: NoiseModel = None):
"""Initializes an instance of the quantum k-means algorithm."""
#self.cluster_centers_ = np.empty(0)
#self.labels_ = np.empty(0)
#self.n_iter_ = 0
self.n_clusters = n_clusters
self.init = init
self.tol = tol
self.verbose = verbose
self.max_iter = max_iter
self.backend = backend
self.map_type = map_type
self.shots = shots
self.norm_relevance = norm_relevance
self.initial_center = initial_center
self.noise_model = noise_model
def fit(self, X: np.ndarray, y: np.ndarray = None, batch: bool = False):
"""Computes quantum k-means clustering.
Args:
X: Training instances to cluster.
batch: Option for using batches to calculate distances.
Returns:
self: Fitted estimator.
"""
if self.verbose:
print('Data is:',X)
finished = False
old_X = pd.DataFrame(X)
if self.map_type == 'probability':
X, norms = preprocess(X, self.map_type, self.norm_relevance)
X = pd.DataFrame(X)
else:
X = pd.DataFrame(preprocess(X, self.map_type, self.norm_relevance))
if self.init == 'q-means++':
if self.map_type == 'probability':
self.cluster_centers_, _ = qmeans_plusplus(X, self.n_clusters, self.backend, self.map_type, self.verbose, self.initial_center, shots=self.shots, batch=batch, norms=norms)
elif self.map_type == 'angle':
self.cluster_centers_, _ = qmeans_plusplus(X, self.n_clusters, self.backend, self.map_type, self.verbose, self.initial_center, shots=self.shots, batch=batch)
self.cluster_centers_ = pd.DataFrame(self.cluster_centers_)#.values
elif self.init == 'random':
self.cluster_centers_ = old_X.sample(n=self.n_clusters)
self.n_iter_ = 0
while not finished and self.n_iter_ < self.max_iter:
if self.verbose:
print("Iteration",self.n_iter_)
if self.map_type == 'probability':
normalized_clusters, cluster_norms = preprocess(self.cluster_centers_.values, self.map_type, self.norm_relevance)
elif self.map_type == 'angle':
normalized_clusters = preprocess(self.cluster_centers_.values, self.map_type, self.norm_relevance)
normalized_clusters = pd.DataFrame(normalized_clusters)
if batch:
distances = batch_distances(X, normalized_clusters, self.backend, self.map_type, self.shots, self.verbose, norms, cluster_norms)
else:
if self.map_type == 'probability':
distances = np.asarray([[distance(point,centroid,self.backend,self.map_type,self.shots,np.array([norms[i],cluster_norms[j]]),noise_model=self.noise_model) for i, point in X.iterrows()] for j, centroid in normalized_clusters.iterrows()])
elif self.map_type == 'angle':
distances = np.asarray([[distance(point,centroid,self.backend,self.map_type,self.shots,np.array([1,1]),self.norm_relevance,noise_model=self.noise_model) for i, point in X.iterrows()] for j, centroid in normalized_clusters.iterrows()])
self.labels_ = np.asarray([np.argmin(distances[:,i]) for i in range(distances.shape[1])])
new_centroids = old_X.groupby(self.labels_).mean() #Needs to be checked to see if less centers are an option
if self.verbose:
print("Old centroids are",self.cluster_centers_.values)
if self.verbose:
print("New centroids are",new_centroids.values)
if abs((new_centroids.values - self.cluster_centers_.values).sum(axis=0).sum()) < self.tol:
finished = True
self.cluster_centers_ = new_centroids
if self.verbose:
print("Centers are", self.labels_)
self.n_iter_ += 1
return self
def predict(self, X: np.ndarray, sample_weight: np.ndarray = None, batch: bool = False):
"""Predict the closest cluster each sample in X belongs to.
Args:
X: New data points to predict.
sample_weight: The weights for each observation in X. If None, all observations are
assigned equal weight.
batch: Option for using batches to calculate distances.
Returns:
labels: Centroid labels for each data point.
"""
if self.map_type == 'probability':
X, norms = preprocess(X, self.map_type, self.norm_relevance)
X = pd.DataFrame(X)
else:
X = pd.DataFrame(preprocess(X, self.map_type, self.norm_relevance))
if self.map_type == 'probability':
normalized_clusters, cluster_norms = preprocess(self.cluster_centers_.values, self.map_type, self.norm_relevance)
normalized_clusters = pd.DataFrame(normalized_clusters)
else:
normalized_clusters = pd.DataFrame(preprocess(self.cluster_centers_.values, self.map_type, self.norm_relevance))
if sample_weight is None:
if batch:
distances = batch_distances(X, self.cluster_centers_, self.backend, self.map_type, self.shots, self.verbose)
else:
if self.map_type == 'probability':
distances = np.asarray([[distance(point,centroid,self.backend,self.map_type,self.shots,np.array([norms[i],cluster_norms[j]]),noise_model=self.noise_model) for i,point in X.iterrows()] for j,centroid in normalized_clusters.iterrows()])
elif self.map_type == 'angle':
distances = np.asarray([[distance(point,centroid,self.backend,self.map_type,self.shots,np.array([1,1]),self.norm_relevance,noise_model=self.noise_model) for i,point in X.iterrows()] for j,centroid in normalized_clusters.iterrows()])
else:
weight_X = X * sample_weight
if batch:
batch_distances(weight_X, self.cluster_centers_, self.backend, self.map_type, self.shots, self.verbose)
else:
if self.map_type == 'probability':
distances = np.asarray([[distance(point,centroid,self.backend,self.map_type,self.shots,np.array([norms[i],cluster_norms[j]]),noise_model=self.noise_model) for i,point in weight_X.iterrows()] for j,centroid in normalized_clusters.iterrows()])
elif self.map_type == 'angle':
distances = np.asarray([[distance(point,centroid,self.backend,self.map_type,self.shots,np.array([1,1]),self.norm_relevance,noise_model=self.noise_model) for i,point in weight_X.iterrows()] for j,centroid in normalized_clusters.iterrows()])
labels = np.asarray([np.argmin(distances[:,i]) for i in range(distances.shape[1])])
return labels
def get_params(self, deep: bool = True):
"""Get parameters for this estimator.
Args:
deep: If True, will return the parameters for this estimator and contained subobjects
that are estimators.
Returns:
params: Parameter names mapped to their values.
"""
return {"n_clusters": self.n_clusters, "init": self.init, "tol": self.tol, "verbose": self.verbose, "max_iter": self.max_iter, "backend": self.backend, "map_type": self.map_type, "shots": self.shots, "norm_relevance": self.norm_relevance, "initial_center": self.initial_center }
def set_params(self, **params):
"""Set the parameters of this estimator.
Args:
**params: Estimator parameters.
Returns:
self: Estimator instance.
"""
for parameter, value in params.items():
setattr(self, parameter, value)
return self
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QiskitCircuit():
# Specify initial parameters and the quantum circuit
def __init__(self,shots):
self.theta = Parameter('Theta')
self.shots = shots
def create_circuit():
qr = QuantumRegister(1,'q')
cr = ClassicalRegister(1,'c')
ckt = QuantumCircuit(qr,cr)
ckt.h(qr[0])
ckt.barrier()
ckt.ry(self.theta,qr[0])
ckt.barrier()
ckt.measure(qr,cr)
return ckt
self.circuit = create_circuit()
def N_qubit_expectation_Z(self,counts, shots, nr_qubits):
expects = np.zeros(nr_qubits)
for key in counts.keys():
perc = counts[key]/shots
check = np.array([(float(key[i])-1/2)*2*perc for i in range(nr_qubits)])
expects += check
return expects
def bind(self, parameters):
[self.theta] = to_numbers(parameters)
self.circuit.data[2][0]._params = to_numbers(parameters)
def run(self, i):
self.bind(i)
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.circuit,backend,shots=self.shots)
result_sim = job_sim.result()
counts = result_sim.get_counts(self.circuit)
return self.N_qubit_expectation_Z(counts,self.shots,1)
class TorchCircuit(Function):
@staticmethod
def forward(ctx, i):
if not hasattr(ctx, 'QiskitCirc'):
ctx.QiskitCirc = QiskitCircuit(shots=100)
exp_value = ctx.QiskitCirc.run(i[0])
result = torch.tensor([exp_value]) # store the result as a torch tensor
ctx.save_for_backward(result, i)
return result
@staticmethod
def backward(ctx, grad_output):
s = np.pi/2
forward_tensor, i = ctx.saved_tensors
# Obtain paramaters
input_numbers = to_numbers(i[0])
gradient = []
for k in range(len(input_numbers)):
input_plus_s = input_numbers
input_plus_s[k] = input_numbers[k] + s # Shift up by s
exp_value_plus = ctx.QiskitCirc.run(torch.tensor(input_plus_s))[0]
result_plus_s = torch.tensor([exp_value_plus])
input_minus_s = input_numbers
input_minus_s[k] = input_numbers[k] - s # Shift down by s
exp_value_minus = ctx.QiskitCirc.run(torch.tensor(input_minus_s))[0]
result_minus_s = torch.tensor([exp_value_minus])
gradient_result = (result_plus_s - result_minus_s)
gradient.append(gradient_result)
result = torch.tensor([gradient])
return result.float() * grad_output.float()
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import tensorflow as tf
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
#Testing data
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels1 = cifar_testset.targets # get the labels for the data
labels1 = np.array(labels1)
idx1_ae = np.where(labels1 == 0) # filter on aeroplanes
idx2_au = np.where(labels1 == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=50
# concatenate the data indices
idxa = np.concatenate((idx1_ae[0][0:n],idx2_au[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idxa]
cifar_testset.data = cifar_testset.data[idxa]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
#qc = TorchCircuit.apply
Ignore this cell
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
#Testing the quantum hybrid in order to comapre it with the classical one
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500,1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x)
|
https://github.com/Gopal-Dahale/qiskit-qulacs
|
Gopal-Dahale
|
"""Tests for the Adapter class."""
from unittest import TestCase
import numpy as np
import pytest
import qiskit.circuit.library as lib
from ddt import data, ddt
from qiskit import QuantumCircuit, QuantumRegister, transpile
from qiskit.circuit import Parameter, ParameterVector
from qiskit.circuit.library import PauliEvolutionGate, TwoLocal
from qiskit.quantum_info import SparsePauliOp
from qiskit_aer import Aer
from qiskit_qulacs.adapter import (
convert_qiskit_to_qulacs_circuit,
convert_sparse_pauliop_to_qulacs_obs,
)
from qiskit_qulacs.qulacs_backend import QulacsBackend
from qulacs import Observable, ParametricQuantumCircuit, PauliOperator, QuantumState
# FSim Gate with fixed parameters
# source: https://quantumai.google/reference/python/cirq/FSimGate
fsim_mat = np.array(
[
[1, 0, 0, 0],
[0, np.cos(np.pi / 3), -1j * np.sin(np.pi / 3), 0],
[0, -1j * np.sin(np.pi / 3), np.cos(np.pi / 3), 0],
[0, 0, 0, np.exp(-1j * np.pi / 4)],
]
)
qiskit_standard_gates = [
lib.IGate(),
lib.SXGate(),
lib.XGate(),
lib.CXGate(),
lib.RZGate(Parameter("Ξ»")),
lib.RGate(Parameter("Ο΄"), Parameter("Ο")),
lib.C3SXGate(),
lib.CCXGate(),
lib.DCXGate(),
lib.CHGate(),
lib.CPhaseGate(Parameter("Ο΄")),
lib.CRXGate(Parameter("Ο΄")),
lib.CRYGate(Parameter("Ο΄")),
lib.CRZGate(Parameter("Ο΄")),
lib.CSwapGate(),
lib.CSXGate(),
lib.CUGate(Parameter("Ο΄"), Parameter("Ο"), Parameter("Ξ»"), Parameter("Ξ³")),
lib.CU1Gate(Parameter("Ξ»")),
lib.CU3Gate(Parameter("Ο΄"), Parameter("Ο"), Parameter("Ξ»")),
lib.CYGate(),
lib.CZGate(),
lib.CCZGate(),
lib.HGate(),
lib.PhaseGate(Parameter("Ο΄")),
lib.RCCXGate(),
lib.RC3XGate(),
lib.RXGate(Parameter("Ο΄")),
lib.RXXGate(Parameter("Ο΄")),
lib.RYGate(Parameter("Ο΄")),
lib.RYYGate(Parameter("Ο΄")),
lib.RZZGate(Parameter("Ο΄")),
lib.RZXGate(Parameter("Ο΄")),
lib.XXMinusYYGate(Parameter("Ο΄"), Parameter("Ο")),
lib.XXPlusYYGate(Parameter("Ο΄"), Parameter("Ο")),
lib.ECRGate(),
lib.SGate(),
lib.SdgGate(),
lib.CSGate(),
lib.CSdgGate(),
lib.SwapGate(),
lib.iSwapGate(),
lib.SXdgGate(),
lib.TGate(),
lib.TdgGate(),
lib.UGate(Parameter("Ο΄"), Parameter("Ο"), Parameter("Ξ»")),
lib.U1Gate(Parameter("Ξ»")),
lib.U2Gate(Parameter("Ο"), Parameter("Ξ»")),
lib.U3Gate(Parameter("Ο΄"), Parameter("Ο"), Parameter("Ξ»")),
lib.YGate(),
lib.ZGate(),
]
def convert_and_check_statevector(testcase, qc, params=[]):
"""
The function converts a Qiskit quantum circuit to a Qulacs circuit,
obtains the statevectors from both frameworks, and checks if they are close.
Args:
testcase: instance of a unittest test case.
qc: quantum circuit in Qiskit.
params: list that contains the parameters to be assigned to the quantum circuit `qc`.
"""
# convert qiskit's quantum circuit to qulacs
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qc)
qulacs_circuit = qulacs_circuit_builder(params)[0]
# Obtaining statevector from qiskit
if params:
qc = qc.assign_parameters(params)
qc.save_statevector()
qiskit_sv = testcase.aer_backend.run(qc).result().get_statevector().data
# Obtaining statevector from qulacs
quantum_state = QuantumState(qulacs_circuit.get_qubit_count())
qulacs_circuit.update_quantum_state(quantum_state)
qulacs_sv = quantum_state.get_vector()
testcase.assertTrue(np.allclose(qiskit_sv, qulacs_sv))
class TestAdapterConverter(TestCase):
"""Tests for the Adapter class."""
def setUp(self):
self.aer_backend = Aer.get_backend("aer_simulator_statevector")
def test_state_preparation_01(self):
"""Tests state_preparation handling of Adapter"""
qulacs_backend = QulacsBackend()
input_state_vector = np.array([np.sqrt(3) / 2, np.sqrt(2) * complex(1, 1) / 4])
qiskit_circuit = QuantumCircuit(1)
qiskit_circuit.prepare_state(input_state_vector, 0)
transpiled_qiskit_circuit = transpile(qiskit_circuit, qulacs_backend)
convert_and_check_statevector(self, transpiled_qiskit_circuit)
def test_state_preparation_00(self):
"""Tests state_preparation handling of Adapter"""
qulacs_backend = QulacsBackend()
input_state_vector = np.array([1 / np.sqrt(2), -1 / np.sqrt(2)])
qiskit_circuit = QuantumCircuit(1)
qiskit_circuit.prepare_state(input_state_vector, 0)
transpiled_qiskit_circuit = transpile(qiskit_circuit, qulacs_backend)
convert_and_check_statevector(self, transpiled_qiskit_circuit)
def test_convert_parametric_qiskit_to_qulacs_circuit(self):
"""Tests convert_qiskit_to_qulacs_circuit works with parametric circuits."""
theta = Parameter("ΞΈ")
phi = Parameter("Ο")
lam = Parameter("Ξ»")
params = np.array([np.pi, np.pi / 2, np.pi / 3])
qiskit_circuit = QuantumCircuit(1, 1)
qiskit_circuit.rx(theta, 0)
qiskit_circuit.ry(phi, 0)
qiskit_circuit.rz(lam, 0)
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qiskit_circuit)
qulacs_circuit = qulacs_circuit_builder(params)[0]
quantum_state = QuantumState(1)
qulacs_circuit.update_quantum_state(quantum_state)
qulacs_result = quantum_state.get_vector()
# https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.html#qiskit.circuit.QuantumCircuit.parameters
# Based on the above docs, the Paramters are sorted alphabetically.
# Therefore ΞΈ, Ο, Ξ» should be sorted as ΞΈ, Ξ», Ο.
# so ΞΈ = params[0], Ξ» = params[1], Ο = params[2]
# Also, the sign of the rotation is negative in qulacs.
qulacs_circuit_ans = ParametricQuantumCircuit(1)
qulacs_circuit_ans.add_parametric_RX_gate(0, -params[0])
qulacs_circuit_ans.add_parametric_RY_gate(0, -params[2])
qulacs_circuit_ans.add_parametric_RZ_gate(0, -params[1])
quantum_state_ans = QuantumState(1)
qulacs_circuit_ans.update_quantum_state(quantum_state_ans)
qulacs_result_ans = quantum_state_ans.get_vector()
self.assertTrue(np.allclose(qulacs_result, qulacs_result_ans))
def test_longer_parameter_expression(self):
"""Tests parameter expression with arbitrary operations and length"""
theta = Parameter("ΞΈ")
phi = Parameter("Ο")
lam = Parameter("Ξ»")
values = [0.1, 0.2, 0.3]
qc = QuantumCircuit(1, 1)
qc.rx(phi * np.cos(theta) + lam, 0)
convert_and_check_statevector(self, qc, values)
def test_quantum_circuit_loaded_multiple_times_with_different_arguments(self):
"""Tests that a loaded quantum circuit can be called multiple times with
different arguments."""
theta = Parameter("ΞΈ")
angle1 = 0.5
angle2 = -0.5
angle3 = 0
qc = QuantumCircuit(3, 1)
qc.rz(theta, [0])
convert_and_check_statevector(self, qc, [angle1])
convert_and_check_statevector(self, qc, [angle2])
convert_and_check_statevector(self, qc, [angle3])
def test_quantum_circuit_with_bound_parameters(self):
"""Tests loading a quantum circuit that already had bound parameters."""
theta = Parameter("ΞΈ")
qc = QuantumCircuit(3, 1)
qc.rz(theta, [0])
qc = qc.assign_parameters({theta: 0.5})
convert_and_check_statevector(self, qc)
def test_unused_parameters_are_ignored(self):
"""Tests that unused parameters are ignored during assignment."""
a, b, c = [Parameter(var) for var in "abc"]
v = ParameterVector("v", 2)
qc = QuantumCircuit(1)
qc.rz(a, 0)
# convert qiskit's quantum circuit to qulacs
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qc)
qulacs_circuit = qulacs_circuit_builder([0.1, 0.2, 0.3, 0.4, 0.5])[0]
# Obtaining statevector from qiskit
qc = qc.assign_parameters([0.1])
qc.save_statevector()
qiskit_sv = self.aer_backend.run(qc).result().get_statevector().data
# Obtaining statevector from qulacs
quantum_state = QuantumState(qulacs_circuit.get_qubit_count())
qulacs_circuit.update_quantum_state(quantum_state)
qulacs_sv = quantum_state.get_vector()
self.assertTrue(np.allclose(qiskit_sv, qulacs_sv))
def test_unused_parameter_vector_items_are_ignored(self):
"""Tests that unused parameter vector items are ignored during assignment."""
a, b, c = [Parameter(var) for var in "abc"]
v = ParameterVector("v", 2)
qc = QuantumCircuit(1)
qc.rz(v[1], 0)
# convert qiskit's quantum circuit to qulacs
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qc)
qulacs_circuit = qulacs_circuit_builder([0.1, 0.2, 0.3, 0.4, 0.5])[0]
# Obtaining statevector from qiskit
qc = qc.assign_parameters([0.1])
qc.save_statevector()
qiskit_sv = self.aer_backend.run(qc).result().get_statevector().data
# Obtaining statevector from qulacs
quantum_state = QuantumState(qulacs_circuit.get_qubit_count())
qulacs_circuit.update_quantum_state(quantum_state)
qulacs_sv = quantum_state.get_vector()
self.assertTrue(np.allclose(qiskit_sv, qulacs_sv))
def test_wires_two_different_quantum_registers(self):
"""Tests loading a circuit with the three-qubit operations supported by PennyLane."""
three_wires = [0, 1, 2]
qr1 = QuantumRegister(2)
qr2 = QuantumRegister(1)
qc = QuantumCircuit(qr1, qr2)
qc.cswap(*three_wires)
convert_and_check_statevector(self, qc)
class TestConverterGates(TestCase):
"""Tests for the Adapter class."""
def setUp(self):
self.aer_backend = Aer.get_backend("aer_simulator_statevector")
def test_u_gate(self):
"""Tests adapter conversion of u gate"""
qiskit_circuit = QuantumCircuit(1)
qiskit_circuit.u(np.pi / 2, np.pi / 3, np.pi / 4, 0)
convert_and_check_statevector(self, qiskit_circuit)
def test_standard_gate_decomp(self):
"""Tests adapter decomposition of all standard gates to forms that can be translated"""
qulacs_backend = QulacsBackend()
for standard_gate in qiskit_standard_gates:
qiskit_circuit = QuantumCircuit(standard_gate.num_qubits)
qiskit_circuit.append(standard_gate, range(standard_gate.num_qubits))
parameters = standard_gate.params
if parameters:
parameter_values = [
(137 / 61) * np.pi / i for i in range(1, len(parameters) + 1)
]
parameter_bindings = dict(zip(parameters, parameter_values))
qiskit_circuit = qiskit_circuit.assign_parameters(parameter_bindings)
transpiled_qiskit_circuit = transpile(qiskit_circuit, qulacs_backend)
with self.subTest(f"Circuit with {standard_gate.name} gate."):
qulacs_job = qulacs_backend.run(transpiled_qiskit_circuit)
qulacs_result = qulacs_job.result().get_statevector()
transpiled_qiskit_circuit.save_statevector()
qiskit_job = self.aer_backend.run(transpiled_qiskit_circuit)
qiskit_result = qiskit_job.result().get_statevector().data
self.assertTrue(np.allclose(qulacs_result, qiskit_result))
def test_exponential_gate_decomp(self):
"""Tests adapter translation of exponential gates"""
qulacs_backend = QulacsBackend()
qiskit_circuit = QuantumCircuit(2)
hamiltonian = SparsePauliOp(["ZZ", "XI"], [1.0, -0.1])
evo = PauliEvolutionGate(hamiltonian, time=2)
qiskit_circuit.append(evo, range(2))
transpiled_qiskit_circuit = transpile(qiskit_circuit, qulacs_backend)
qulacs_job = qulacs_backend.run(transpiled_qiskit_circuit)
qulacs_result = qulacs_job.result().get_statevector()
transpiled_qiskit_circuit.save_statevector()
qiskit_job = self.aer_backend.run(transpiled_qiskit_circuit)
qiskit_result = np.array(qiskit_job.result().get_statevector())
self.assertTrue(np.allclose(qulacs_result, qiskit_result))
def test_unitary_gate(self):
"""Test for unitary gate"""
qiskit_circuit = QuantumCircuit(2)
qiskit_circuit.unitary(fsim_mat, [0, 1])
convert_and_check_statevector(self, qiskit_circuit)
class TestConverterWarningsAndErrors(TestCase):
def test_params_not_passed(self):
"""Tests that a warning is raised if circuit has params but not params passed."""
qc = QuantumCircuit(1)
qc.rx(Parameter("ΞΈ"), 0)
with pytest.raises(
ValueError, match="The number of circuit parameters does not match"
):
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qc)
qulacs_circuit_builder()[0]
def test_template_not_supported(self):
"""Tests that a warning is raised if an unsupported instruction was reached."""
qc = TwoLocal(
4,
["rx", "ry", "rz"],
["cz", "cx"],
"linear",
reps=1,
)
params = np.random.uniform(size=qc.num_parameters)
with pytest.raises(
ValueError, match="The Gate does not support trainable parameter"
):
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qc)
qulacs_circuit_builder(params)[0]
def test_unsupported_gate(self):
"""Tests that a warning is raised if an unsupported gate was reached"""
qc = QuantumCircuit(1)
qc.rx(0.1, 0)
qc.measure_all()
with pytest.warns(UserWarning, match="not supported by Qiskit-Qulacs"):
qulacs_circuit_builder = convert_qiskit_to_qulacs_circuit(qc)
qulacs_circuit_builder()[0]
dummy_obs = [Observable(1), Observable(3), Observable(2)]
dummy_obs[0].add_operator(PauliOperator("I 0", 2.0))
dummy_obs[1].add_operator(PauliOperator("Z 0 Y 1 X 2", 1.0))
dummy_obs[2].add_operator(PauliOperator("Y 0 X 1", 3.0))
dummy_obs[2].add_operator(PauliOperator("X 0 Z 1", 7.0))
observable_factories = [
(SparsePauliOp("I", coeffs=[2]), dummy_obs[0]),
(SparsePauliOp("XYZ"), dummy_obs[1]),
(SparsePauliOp(["XY", "ZX"], coeffs=[3, 7]), dummy_obs[2]),
]
@ddt
class TestConverterObservable(TestCase):
"""Tests for the Adapter class."""
@data(*observable_factories)
def test_convert_with_coefficients(self, ops):
"""Tests that a SparsePauliOp can be converted into a PennyLane operator with the default
coefficients.
"""
pauli_op, want_op = ops
have_op = convert_sparse_pauliop_to_qulacs_obs(pauli_op)
assert have_op.to_json() == want_op.to_json()
|
https://github.com/abhik-99/Qiskit-Summer-School
|
abhik-99
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import json
from typing import Any, Callable, Optional, Tuple, Union
from urllib.parse import urljoin
from qiskit import QuantumCircuit, execute
from qiskit.providers import JobStatus
from qiskit.providers.ibmq.job import IBMQJob
from .api import get_server_endpoint, send_request, get_access_token, get_submission_endpoint
from .exercises import get_question_id
from .util import compute_cost, get_provider, get_job, circuit_to_json, get_job_urls, uses_multiqubit_gate
def _circuit_criteria(
circuit: QuantumCircuit,
max_qubits: Optional[int] = None,
min_cost: Optional[int] = None,
check_gates: Optional[bool] = False
) -> Tuple[Optional[int], Optional[int]]:
if max_qubits is not None and circuit.num_qubits > max_qubits:
print(f'Your circuit has {circuit.num_qubits} qubits, which exceeds the maximum allowed.')
print(f'Please reduce the number of qubits in your circuit to below {max_qubits}.')
return None, None
try:
if check_gates and not uses_multiqubit_gate(circuit):
print('Your circuit appears to not use any multi-quibit gates.')
print('Please review your circuit and try again.')
return None, None
cost = compute_cost(circuit)
if min_cost is not None and cost < min_cost:
print(f'Your circuit cost ({cost}) is too low. But if you are convinced that your circuit\n'
'is correct, please let us know in the `#ibm-quantum-challenge-2020` Slack channel.')
return None, None
return circuit.num_qubits, cost
except Exception as err:
print(f'Unable to compute cost: {err}')
return None, None
def _circuit_grading(
circuit: QuantumCircuit,
lab_id: str,
ex_id: str,
is_submit: Optional[bool] = False,
max_qubits: Optional[int] = None,
min_cost: Optional[int] = None,
check_gates: Optional[bool] = False
) -> Tuple[Optional[dict], Optional[str]]:
payload = None
server = None
if not isinstance(circuit, QuantumCircuit):
print(f'Expected a QuantumCircuit, but was given {type(circuit)}')
print(f'Please provide a circuit as your answer.')
return None, None
if not is_submit:
server = get_server_endpoint(lab_id, ex_id)
if not server:
print('Could not find a valid grading server or '
'the grading servers are down right now.')
return None, None
else:
server = None
_, cost = _circuit_criteria(
circuit,
max_qubits=max_qubits,
min_cost=min_cost,
check_gates=check_gates
)
if cost is not None:
payload = {
'answer': circuit_to_json(circuit)
}
if is_submit:
payload['questionNumber'] = get_question_id(lab_id, ex_id)
else:
payload['question_id'] = get_question_id(lab_id, ex_id)
return payload, server
def _job_grading(
job_or_id: Union[IBMQJob, str],
lab_id: str,
ex_id: str,
is_submit: Optional[bool] = False
) -> Tuple[Optional[dict], Optional[str]]:
if not isinstance(job_or_id, IBMQJob) and not isinstance(job_or_id, str):
print(f'Expected an IBMQJob or a job ID, but was given {type(job_or_id)}')
print(f'Please submit a job as your answer.')
return None, None
if not is_submit:
server = get_server_endpoint(lab_id, ex_id)
if not server:
print('Could not find a valid grading server or the grading '
'servers are down right now.')
return None, None
else:
server = None
job = get_job(job_or_id) if isinstance(job_or_id, str) else job_or_id
if not job:
print('An invalid or non-existent job was specified.')
return None, None
job_status = job.status()
if job_status in [JobStatus.CANCELLED, JobStatus.ERROR]:
print(f'Job did not successfully complete: {job_status.value}.')
return None, None
elif job_status is not JobStatus.DONE:
print(f'Job has not yet completed: {job_status.value}.')
print(f'Please wait for the job (id: {job.job_id()}) to complete then try again.')
return None, None
header = job.result().header.to_dict()
if 'qc_cost' not in header:
if is_submit:
print('An unprepared answer was specified. '
'Please prepare() and grade() answer before submitting.')
else:
print('An unprepared answer was specified. Please prepare() answer before grading.')
return None, None
download_url, result_url = get_job_urls(job)
if not download_url or not result_url:
print('Unable to obtain job URLs')
return None, None
payload = {
'answer': json.dumps({
'download_url': download_url,
'result_url': result_url
})
}
if is_submit:
payload['questionNumber'] = get_question_id(lab_id, ex_id)
else:
payload['question_id'] = get_question_id(lab_id, ex_id)
return payload, server
def _number_grading(
answer: int,
lab_id: str,
ex_id: str,
is_submit: Optional[bool] = False
) -> Tuple[Optional[dict], Optional[str]]:
if not isinstance(answer, int):
print(f'Expected a integer, but was given {type(answer)}')
print(f'Please provide a number as your answer.')
return None, None
if not is_submit:
server = get_server_endpoint(lab_id, ex_id)
if not server:
print('Could not find a valid grading server '
'or the grading servers are down right now.')
return None, None
else:
server = None
payload = {
'answer': str(answer)
}
if is_submit:
payload['questionNumber'] = get_question_id(lab_id, ex_id)
else:
payload['question_id'] = get_question_id(lab_id, ex_id)
return payload, server
def prepare_circuit(
circuit: QuantumCircuit,
max_qubits: Optional[int] = 28,
min_cost: Optional[int] = None,
check_gates: Optional[bool] = False,
**kwargs
) -> Optional[IBMQJob]:
job = None
if not isinstance(circuit, QuantumCircuit):
print(f'Expected a QuantumCircuit, but was given {type(circuit)}')
print(f'Please provide a circuit.')
return None
_, cost = _circuit_criteria(
circuit,
max_qubits=max_qubits,
min_cost=min_cost,
check_gates=check_gates
)
if cost is not None:
if 'backend' not in kwargs:
kwargs['backend'] = get_provider().get_backend('ibmq_qasm_simulator')
# execute experiments
print('Starting experiment. Please wait...')
job = execute(
circuit,
qobj_header={
'qc_cost': cost
},
**kwargs
)
print(f'You may monitor the job (id: {job.job_id()}) status '
'and proceed to grading when it successfully completes.')
return job
def prepare_solver(
solver_func: Callable,
lab_id: str,
ex_id: str,
problem_set: Optional[Any] = None,
max_qubits: Optional[int] = 28,
min_cost: Optional[int] = None,
check_gates: Optional[bool] = False,
**kwargs
) -> Optional[IBMQJob]:
job = None
if not callable(solver_func):
print(f'Expected a function, but was given {type(solver_func)}')
print(f'Please provide a function that returns a QuantumCircuit.')
return None
server = get_server_endpoint(lab_id, ex_id)
if not server:
print('Could not find a valid grading server or the grading servers are down right now.')
return
endpoint = server + 'problem-set'
index, value = get_problem_set(lab_id, ex_id, endpoint)
print(f'Running {solver_func.__name__}...')
qc_1 = solver_func(problem_set)
_, cost = _circuit_criteria(
qc_1,
max_qubits=max_qubits,
min_cost=min_cost,
check_gates=check_gates
)
if value and index is not None and index >= 0 and cost is not None:
qc_2 = solver_func(value)
if 'backend' not in kwargs:
kwargs['backend'] = get_provider().get_backend('ibmq_qasm_simulator')
# execute experiments
print('Starting experiments. Please wait...')
job = execute(
[qc_1, qc_2],
qobj_header={
'qc_index': [None, index],
'qc_cost': cost
},
**kwargs
)
print(f'You may monitor the job (id: {job.job_id()}) status '
'and proceed to grading when it successfully completes.')
return job
def grade_circuit(
circuit: QuantumCircuit,
lab_id: str,
ex_id: str,
max_qubits: Optional[int] = 28,
min_cost: Optional[int] = None
) -> bool:
payload, server = _circuit_grading(
circuit,
lab_id,
ex_id,
is_submit=False,
max_qubits=max_qubits,
min_cost=min_cost
)
if payload:
print('Grading your answer. Please wait...')
return grade_answer(
payload,
server + 'validate-answer'
)
return False
def grade_job(
job_or_id: Union[IBMQJob, str],
lab_id: str,
ex_id: str
) -> bool:
payload, server = _job_grading(job_or_id, lab_id, ex_id, is_submit=False)
if payload:
print('Grading your answer. Please wait...')
return grade_answer(
payload,
server + 'validate-answer'
)
return False
def grade_number(
answer: int,
lab_id: str,
ex_id: str
) -> bool:
payload, server = _number_grading(answer, lab_id, ex_id, is_submit=False)
if payload:
print('Grading your answer. Please wait...')
return grade_answer(
payload,
server + 'validate-answer'
)
return False
def submit_circuit(
circuit: QuantumCircuit,
lab_id: str,
ex_id: str,
max_qubits: Optional[int] = 28,
min_cost: Optional[int] = None
) -> bool:
payload, _ = _circuit_grading(
circuit,
lab_id,
ex_id,
is_submit=True,
max_qubits=max_qubits,
min_cost=min_cost
)
if payload:
print('Submitting your answer. Please wait...')
return submit_answer(payload)
return False
def submit_job(
job_or_id: IBMQJob,
lab_id: str,
ex_id: str,
) -> bool:
payload, _ = _job_grading(job_or_id, lab_id, ex_id, is_submit=True)
if payload:
print('Submitting your answer. Please wait...')
return submit_answer(payload)
return False
def submit_number(
answer: int,
lab_id: str,
ex_id: str
) -> bool:
payload, _ = _number_grading(answer, lab_id, ex_id, is_submit=True)
if payload:
print('Submitting your answer. Please wait...')
return submit_answer(payload)
return False
def get_problem_set(
lab_id: str, ex_id: str, endpoint: str
) -> Tuple[Optional[int], Optional[Any]]:
problem_set_response = None
try:
payload = {'question_id': get_question_id(lab_id, ex_id)}
problem_set_response = send_request(endpoint, query=payload, method='GET')
except Exception as err:
print('Unable to obtain the problem set')
if problem_set_response:
status = problem_set_response.get('status')
if status == 'valid':
try:
index = problem_set_response.get('index')
value = json.loads(problem_set_response.get('value'))
return index, value
except Exception as err:
print(f'Problem set could not be processed: {err}')
else:
cause = problem_set_response.get('cause')
print(f'Problem set failed: {cause}')
return None, None
def grade_answer(payload: dict, endpoint: str, cost: Optional[int] = None) -> bool:
try:
answer_response = send_request(endpoint, body=payload)
status = answer_response.get('status', None)
cause = answer_response.get('cause', None)
score = cost if cost else answer_response.get('score', None)
handle_grade_response(status, score=score, cause=cause)
return status == 'valid' or status is True
except Exception as err:
print(f'Failed: {err}')
return False
def submit_answer(payload: dict) -> bool:
try:
access_token = get_access_token()
baseurl = get_submission_endpoint()
endpoint = urljoin(baseurl, './challenges/answers')
submit_response = send_request(
endpoint,
body=payload,
query={'access_token': access_token}
)
status = submit_response.get('status', None)
if status is None:
status = submit_response.get('valid', None)
cause = submit_response.get('cause', None)
handle_submit_response(status, cause=cause)
return status == 'valid' or status is True
except Exception as err:
print(f'Failed: {err}')
return False
def handle_grade_response(
status: Optional[str], score: Optional[int] = None, cause: Optional[str] = None
) -> None:
if status == 'valid':
print('\nCongratulations π! Your answer is correct.')
if score is not None:
print(f'Your score is {score}.')
elif status == 'invalid':
print(f'\nOops π! {cause}')
print('Please review your answer and try again.')
elif status == 'notFinished':
print(f'Job has not finished: {cause}')
print(f'Please wait for the job to complete then try again.')
else:
print(f'Failed: {cause}')
print('Unable to grade your answer.')
def handle_submit_response(
status: Union[str, bool], cause: Optional[str] = None
) -> None:
if status == 'valid' or status is True:
print('\nSuccess π! Your answer has been submitted.')
elif status == 'invalid' or status is False:
print(f'\nOops π! {"Your answer is incorrect" if cause is None else cause}')
print('Make sure your answer is correct and successfully graded before submitting.')
elif status == 'notFinished':
print(f'Job has not finished: {cause}')
print(f'Please wait for the job to complete, grade it, and then try to submit again.')
else:
print(f'Failed: {cause}')
print('Unable to submit your answer at this time.')
|
https://github.com/GabrielPontolillo/QiskitPBT
|
GabrielPontolillo
|
import random
from qiskit import QuantumCircuit
from QiskitPBT.case_studies.deutsch_jozsa.deutsch_jozsa import deutsch_jozsa_circ
from QiskitPBT.case_studies.deutsch_jozsa.dj_helpers import ConstantOracleInputGenerator, vmerge
from QiskitPBT.property import Property
class DeutschJozsaVMergeTwoConstantOracles(Property):
# specify the inputs that are to be generated
def get_input_generators(self):
return [ConstantOracleInputGenerator(2, 5), ConstantOracleInputGenerator(2, 5)]
# specify the preconditions for the test
def preconditions(self, oracle1, oracle2):
return True
# specify the operations to be performed on the input
def operations(self, oracle1: QuantumCircuit, oracle2: QuantumCircuit):
circ = deutsch_jozsa_circ(vmerge(oracle1, oracle2))
# if oracle is constant this should be all 0
baseline = QuantumCircuit(circ.num_qubits - 1, circ.num_qubits - 1)
self.statistical_analysis.assert_equal(self, list(range(circ.num_qubits - 1)), circ, list(range(oracle1.num_qubits - 1)), baseline, basis=["z"])
|
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for the converters."""
import math
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.circuit import Gate, Qubit
from qiskit.quantum_info import Operator
from qiskit.test import QiskitTestCase
from qiskit.exceptions import QiskitError
class TestCircuitToGate(QiskitTestCase):
"""Test QuantumCircuit to Gate"""
def test_simple_circuit(self):
"""test simple circuit"""
qr1 = QuantumRegister(4, "qr1")
qr2 = QuantumRegister(3, "qr2")
qr3 = QuantumRegister(3, "qr3")
circ = QuantumCircuit(qr1, qr2, qr3)
circ.cx(qr1[1], qr2[2])
gate = circ.to_gate()
q = QuantumRegister(10, "q")
self.assertIsInstance(gate, Gate)
self.assertEqual(gate.definition[0].qubits, (q[1], q[6]))
def test_circuit_with_registerless_bits(self):
"""Test a circuit with registerless bits can be converted to a gate."""
qr1 = QuantumRegister(2)
qubits = [Qubit(), Qubit(), Qubit()]
qr2 = QuantumRegister(3)
circ = QuantumCircuit(qr1, qubits, qr2)
circ.cx(3, 5)
gate = circ.to_gate()
self.assertIsInstance(gate, Gate)
self.assertEqual(gate.num_qubits, len(qr1) + len(qubits) + len(qr2))
gate_definition = gate.definition
cx = gate_definition.data[0]
self.assertEqual(cx.qubits, (gate_definition.qubits[3], gate_definition.qubits[5]))
self.assertEqual(cx.clbits, ())
def test_circuit_with_overlapping_registers(self):
"""Test that the conversion works when the given circuit has bits that are contained in more
than one register."""
qubits = [Qubit() for _ in [None] * 10]
qr1 = QuantumRegister(bits=qubits[:6])
qr2 = QuantumRegister(bits=qubits[4:])
circ = QuantumCircuit(qubits, qr1, qr2)
circ.cx(3, 5)
gate = circ.to_gate()
self.assertIsInstance(gate, Gate)
self.assertEqual(gate.num_qubits, len(qubits))
gate_definition = gate.definition
cx = gate_definition.data[0]
self.assertEqual(cx.qubits, (gate_definition.qubits[3], gate_definition.qubits[5]))
self.assertEqual(cx.clbits, ())
def test_raises(self):
"""test circuit which can't be converted raises"""
circ1 = QuantumCircuit(3)
circ1.x(0)
circ1.cx(0, 1)
circ1.barrier()
circ2 = QuantumCircuit(1, 1)
circ2.measure(0, 0)
circ3 = QuantumCircuit(1)
circ3.x(0)
circ3.reset(0)
with self.assertRaises(QiskitError): # TODO: accept barrier
circ1.to_gate()
with self.assertRaises(QiskitError): # measure and reset are not valid
circ2.to_gate()
def test_generated_gate_inverse(self):
"""Test inverse of generated gate works."""
qr1 = QuantumRegister(2, "qr1")
circ = QuantumCircuit(qr1)
circ.cx(qr1[1], qr1[0])
gate = circ.to_gate()
out_gate = gate.inverse()
self.assertIsInstance(out_gate, Gate)
def test_to_gate_label(self):
"""Test label setting."""
qr1 = QuantumRegister(2, "qr1")
circ = QuantumCircuit(qr1, name="a circuit name")
circ.cx(qr1[1], qr1[0])
gate = circ.to_gate(label="a label")
self.assertEqual(gate.label, "a label")
def test_zero_operands(self):
"""Test that a gate can be created, even if it has zero operands."""
base = QuantumCircuit(global_phase=math.pi)
gate = base.to_gate()
self.assertEqual(gate.num_qubits, 0)
self.assertEqual(gate.num_clbits, 0)
self.assertEqual(gate.definition, base)
compound = QuantumCircuit(1)
compound.append(gate, [], [])
np.testing.assert_allclose(-np.eye(2), Operator(compound), atol=1e-16)
|
https://github.com/indian-institute-of-science-qc/qiskit-aakash
|
indian-institute-of-science-qc
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring
"""Tests for comparing the outputs of circuit drawer with expected ones."""
import os
import unittest
from codecs import encode
from math import pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.tools.visualization import HAS_MATPLOTLIB, circuit_drawer
from .visualization import QiskitVisualizationTestCase, path_to_diagram_reference
class TestCircuitVisualizationImplementation(QiskitVisualizationTestCase):
"""Visual accuracy of visualization tools outputs tests."""
latex_reference = path_to_diagram_reference('circuit_latex_ref.png')
matplotlib_reference = path_to_diagram_reference('circuit_matplotlib_ref.png')
text_reference = path_to_diagram_reference('circuit_text_ref.txt')
def sample_circuit(self):
"""Generate a sample circuit that includes the most common elements of
quantum circuits.
"""
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[0])
circuit.y(qr[0])
circuit.z(qr[0])
circuit.barrier(qr[0])
circuit.barrier(qr[1])
circuit.barrier(qr[2])
circuit.h(qr[0])
circuit.s(qr[0])
circuit.sdg(qr[0])
circuit.t(qr[0])
circuit.tdg(qr[0])
circuit.iden(qr[0])
circuit.reset(qr[0])
circuit.rx(pi, qr[0])
circuit.ry(pi, qr[0])
circuit.rz(pi, qr[0])
circuit.u0(pi, qr[0])
circuit.u1(pi, qr[0])
circuit.u2(pi, pi, qr[0])
circuit.u3(pi, pi, pi, qr[0])
circuit.swap(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cy(qr[0], qr[1])
circuit.cz(qr[0], qr[1])
circuit.ch(qr[0], qr[1])
circuit.cu1(pi, qr[0], qr[1])
circuit.cu3(pi, pi, pi, qr[0], qr[1])
circuit.crz(pi, qr[0], qr[1])
circuit.ccx(qr[0], qr[1], qr[2])
circuit.cswap(qr[0], qr[1], qr[2])
circuit.measure(qr, cr)
return circuit
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_latex_drawer(self):
filename = self._get_resource_path('current_latex.png')
qc = self.sample_circuit()
circuit_drawer(qc, filename=filename, output='latex')
self.assertImagesAreEqual(filename, self.latex_reference)
os.remove(filename)
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_matplotlib_drawer(self):
filename = self._get_resource_path('current_matplot.png')
qc = self.sample_circuit()
circuit_drawer(qc, filename=filename, output='mpl')
self.assertImagesAreEqual(filename, self.matplotlib_reference)
os.remove(filename)
def test_text_drawer(self):
filename = self._get_resource_path('current_textplot.txt')
qc = self.sample_circuit()
output = circuit_drawer(qc, filename=filename, output="text", line_length=-1)
self.assertFilesAreEqual(filename, self.text_reference)
os.remove(filename)
try:
encode(str(output), encoding='cp437')
except UnicodeEncodeError:
self.fail("_text_circuit_drawer() should only use extended ascii (aka code page 437).")
if __name__ == '__main__':
unittest.main(verbosity=2)
|
https://github.com/Jaybsoni/QuantumCompiler
|
Jaybsoni
|
from qcompile import comp_utils as utils
from qiskit import *
import numpy as np
from pprint import pprint
import random
import matplotlib.pyplot as plt
random.seed(1) # set random seed 1
# Intro to some helper functions (which can be found in comp_utils.py):
circ = qiskit.QuantumCircuit(3) # construct a simple 3 qbit circuit
circ.h(0)
circ.cx(0, 2)
circ.i(1)
circ.z(0)
print(circ) ## visualize the circuit
## read_circ .......................................................................................................
gate_lst, num_qbits = utils.read_circ(circ) # here we use the read_circ helper function to extract the meta data
print('gate_lst: {}\n'.format(gate_lst)) # the gate_lst is an ordered list of tuples containing info about
# the gate, the qbits being applied to, and the parameters of the gate
## general_replace .................................................................................................
utils.general_replace(gate_lst, 'Z', [('X', [], [])]) # the general_replace function allows us to manipulate the gate_lst
print('replaced Z: {}\n'.format(gate_lst)) # here I replaced each instance of Z gate with an X gate.
utils.general_replace(gate_lst, 'I', []) # we can remove gates by providing an empty list of replacement gates
print('removed I: {}\n'.format(gate_lst)) # here I just removes every instance of 'I'
## write_circ ......................................................................................................
new_circ = utils.write_circ(gate_lst, num_qbits) # this function takes the gate_lst and creates a qiskit circuit object
print(new_circ)
# Here we will construct the gates defined above and numerically check if they are equal :
def Rx(theta):
"""Produces the Rx matrix given float theta (in radians) """
Rx_mat = np.array([[np.cos(theta/2), complex(0, -np.sin(theta/2))],
[complex(0, -np.sin(theta/2)), np.cos(theta/2)]])
return Rx_mat
def Ry(theta):
"""Produces the Ry matrix given float theta (in radians) """
Ry_mat = np.array([[np.cos(theta/2), -np.sin(theta/2)],
[np.sin(theta/2), np.cos(theta/2)]])
return Ry_mat
def Rz(phi):
"""Produces the Rz matrix given float phi (in radians) """
Rz_mat = np.array([[np.exp(-complex(0, phi/2)), 0],
[0, np.exp(complex(0, phi/2))]])
return Rz_mat
I = np.eye(2)
H = (1 / np.sqrt(2)) * np.array([[1, 1],
[1, -1]])
X = np.array([[0, 1],
[1, 0]])
Y = np.array([[0, -complex(0, 1)],
[complex(0, 1), 0]])
Z = np.array([[1, 0],
[0, -1]])
print('I matrix: --------------------- ')
pprint(I)
pprint(np.round(Rz(0), decimals=3))
pprint(np.round(Rx(0), decimals=3))
print('\n')
print('H matrix: --------------------- ')
pprint(H)
pprint(np.round(Rz(np.pi/2) @ Rx(np.pi/2) @ Rz(np.pi/2), decimals=3))
pprint(np.round(Rx(np.pi/2) @ Rz(np.pi/2) @ Rx(np.pi/2), decimals=3))
print('\n')
print('X matrix: --------------------- ')
pprint(X)
pprint(np.round(Rx(np.pi), decimals=3))
print('\n')
print('Z matrix: --------------------- ')
pprint(Z)
pprint(np.round(Rz(np.pi), decimals=3))
print('\n')
print('Y matrix: --------------------- ')
pprint(Y)
pprint(np.round(Rz(-np.pi/2) @ Rx(np.pi) @ Rz(np.pi/2), decimals=3))
pprint(np.round(Rx(np.pi/2) @ Rz(np.pi) @ Rx(-np.pi/2), decimals=3))
print('I matrix: --------------------- ')
global_phase = np.exp(0)
pprint(I)
pprint(np.round( global_phase * Rz(0) , decimals=3))
pprint(np.round( global_phase * Rx(0) , decimals=3))
print('\n')
print('H matrix: --------------------- ')
global_phase = np.exp(complex(0, np.pi/2))
pprint(H)
pprint(np.round( global_phase * (Rz(np.pi/2) @ Rx(np.pi/2) @ Rz(np.pi/2)) , decimals=3))
pprint(np.round( global_phase * (Rx(np.pi/2) @ Rz(np.pi/2) @ Rx(np.pi/2)) , decimals=3))
print('\n')
print('X matrix: --------------------- ')
global_phase = np.exp(complex(0, np.pi/2))
pprint(X)
pprint(np.round( global_phase * Rx(np.pi) , decimals=3))
print('\n')
print('Z matrix: --------------------- ')
global_phase = np.exp(complex(0, np.pi/2))
pprint(Z)
pprint(np.round( global_phase * Rz(np.pi) , decimals=3))
print('\n')
print('Y matrix: --------------------- ')
global_phase = np.exp(complex(0, 3 * np.pi/2))
pprint(Y)
pprint(np.round( global_phase * (Rz(-np.pi/2) @ Rx(np.pi) @ Rz(np.pi/2)) , decimals=3))
pprint(np.round( global_phase * (Rx(np.pi/2) @ Rz(np.pi) @ Rx(-np.pi/2)) , decimals=3))
def simple_compiler(circ):
'''A quantum compiler that produces a new quantum circuit from the
restricted subset of available gates. '''
gate_lst, num_qbits = utils.read_circ(circ)
# replace CNOT:
replacement_gates = [('H', utils.get_second, []), ('Cz', [], []), ('H', utils.get_second, [])]
utils.general_replace(gate_lst, 'Cx', replacement_gates)
# replace Identity:
replacement_gates = [('Rz', [], [0])]
utils.general_replace(gate_lst, 'I', replacement_gates)
# replace Hadamard:
replacement_gates = [('Rz', [], [np.pi/2]), ('Rx', [], [np.pi/2]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'H', replacement_gates)
# replace X:
replacement_gates = [('Rx', [], [np.pi])]
utils.general_replace(gate_lst, 'X', replacement_gates)
# replace Z:
replacement_gates = [('Rz', [], [np.pi])]
utils.general_replace(gate_lst, 'Z', replacement_gates)
# replace y:
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], [np.pi]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Y', replacement_gates)
# replace Ry(theta):
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], utils.get_first), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Ry', replacement_gates)
compiled_circ = utils.write_circ(gate_lst, num_qbits)
return compiled_circ
# Testing ground:
circ = utils.random_circ_generator(num_qbits=3, num_gates=5) # randomly generate a circuit,
print(circ)
compiled_circ = simple_compiler(circ) # compile it
print(compiled_circ)
equal = utils.circ_equal(circ, compiled_circ) # this helper function compares the magnitudes of each state_vector (element wise)
print(equal) # to determine if they are identical (up to a global phase)
# Brute force test:
for i in range(1000):
circ = utils.random_circ_generator()
compiled_circ = simple_compiler(circ)
equal = utils.circ_equal(circ, compiled_circ)
if not equal.all():
print('FAILED at circuit {}'.format(i))
break
else:
print('passed circuit {}'.format(i))
print('Passed all tests!')
# Here we analyze the overhead
depth_array = []
compiled_depth_array = []
ratio = []
for i in range(100):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # randomly generate a circuit with 5 qbits and 15 gates
compiled_circ = simple_compiler(circ)
equal = utils.circ_equal(circ, compiled_circ)
if not equal.all(): # make sure we are compiling properly!
print("FAIL @ circuit {}".format(i))
break
depth_circ = circ.depth()
depth_comp = compiled_circ.depth()
depth_array.append(depth_circ) # store depth
compiled_depth_array.append(depth_comp) # store new circuit depth
ratio.append(((depth_comp - depth_circ) / depth_circ) * 100)
print('average initial circuit depth = {} +/- {}'.format(np.mean(depth_array), np.std(depth_array)))
print('average compiled circuit depth = {} +/- {}'.format(np.mean(compiled_depth_array), np.std(compiled_depth_array)))
print('average increase in depth = {}%'.format(np.round(np.mean(ratio), decimals=2)))
plt.plot(depth_array, label='circuit')
plt.plot(compiled_depth_array, label='compiled')
plt.legend()
plt.show()
## Here we implement the optimized compiler:
def compiler(circ):
'''A quantum compiler that produces a new quantum circuit from the
restricted subset of available gates. '''
gate_lst, num_qbits = utils.read_circ(circ)
# Preprocessing (Step1):
utils.general_replace(gate_lst, 'I', []) # remove Identity
length = len(gate_lst)
for index in range(length - 1): # iterate over the lst and remove redundant Cx, Cz gates
if index >= (len(gate_lst) - 1): # by removing the repetitive Cz and Cx gates
break # we reduce the size of the list, so we need to check this edge case
curr_gate_str = gate_lst[index][0]
curr_qbit_lst = gate_lst[index][1]
if curr_gate_str in ['Cx', 'Cz']: # Check if this gate is a Cz or Cx gate
nxt_gate_str = gate_lst[index+1][0]
nxt_qbit_lst = gate_lst[index+1][1]
if ((nxt_gate_str == curr_gate_str) and # check that we are applying a Cz or Cx gate twice
(nxt_qbit_lst == curr_qbit_lst)): # consecutively on the same control and target qbits
del gate_lst[index + 1] # remove both gates
del gate_lst[index]
# Compile (similar to the simple compiler):
# replace CNOT:
replacement_gates = [('H', utils.get_second, []), ('Cz', [], []), ('H', utils.get_second, [])]
utils.general_replace(gate_lst, 'Cx', replacement_gates)
# replace Hadamard:
replacement_gates = [('Rz', [], [np.pi/2]), ('Rx', [], [np.pi/2]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'H', replacement_gates)
# replace X:
replacement_gates = [('Rx', [], [np.pi])]
utils.general_replace(gate_lst, 'X', replacement_gates)
# replace Z:
replacement_gates = [('Rz', [], [np.pi])]
utils.general_replace(gate_lst, 'Z', replacement_gates)
# replace y:
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], [np.pi]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Y', replacement_gates)
# replace Ry(theta):
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], utils.get_first), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Ry', replacement_gates)
# simplification (Step2):
index = 0
while(index < len(gate_lst) - 1):
# print('index: {}'.format(index))
curr_gate_str = gate_lst[index][0]
curr_qbit_lst = gate_lst[index][1]
curr_qbit_params = gate_lst[index][2]
if curr_gate_str in ['Rx', 'Rz']: # Check if this gate is a Rz or Rx gate
i = 1 # another dummy index to look at gates ahead
while(index + i < len(gate_lst)):
# print('dummy: {}'.format(i))
nxt_gate_str = gate_lst[index+i][0]
nxt_qbit_lst = gate_lst[index+i][1]
nxt_qbit_params = gate_lst[index+i][2]
if ((nxt_gate_str == curr_gate_str) and # check that we are applying a Rz or Rx gate twice
(nxt_qbit_lst == curr_qbit_lst)): # consecutively on the same control and target qbits
del gate_lst[index + i] # remove both gates
del gate_lst[index]
new_gate = (curr_gate_str, curr_qbit_lst, [curr_qbit_params[0] + nxt_qbit_params[0]])
gate_lst.insert(index, new_gate) # add the combined gate and
break # break current while loop
elif ((nxt_gate_str != curr_gate_str) and # if the next gate applied to the same qbit is different
(nxt_qbit_lst == curr_qbit_lst or # i.e instead of another Rx gate we apply a Rz or a Cz to
curr_qbit_lst[0] in nxt_qbit_lst)): # the same qbit then
index += 1 # move forward nothing left here to simplify
break
else: # the next gate is being applied to a different set of qbits
i += 1 # so we can safely check the next gate in the list
index += 1
else:
index += 1
compiled_circ = utils.write_circ(gate_lst, num_qbits)
return compiled_circ
# Here we analyze the overhead
depth_array = []
compiled_depth_array = []
optimized_depth_array = []
ratio_simple = []
ratio_opt = []
for i in range(100):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # randomly generate a circuit with 5 qbits and 15 gates
compiled_circ = simple_compiler(circ)
optimized_circ = compiler(circ)
equal1 = utils.circ_equal(circ, compiled_circ)
equal2 = utils.circ_equal(circ, optimized_circ)
if not equal1.all(): # make sure we are compiling properly!
print("simple compiler FAIL @ circuit {}".format(i))
break
if not equal2.all():
print("optimized compiler FAIL @ circuit {}".format(i))
break
depth_circ = circ.depth()
depth_comp = compiled_circ.depth()
depth_opt = optimized_circ.depth()
depth_array.append(depth_circ) # store depth
compiled_depth_array.append(depth_comp) # store new circuit depth
optimized_depth_array.append(depth_opt) # store optimized circuit depth
ratio_simple.append(((depth_comp - depth_circ) / depth_circ) * 100)
ratio_opt.append(((depth_opt - depth_circ) / depth_circ) * 100)
print('average initial circuit depth = {} +/- {}'.format(np.mean(depth_array), np.std(depth_array)))
print('average compiled circuit depth = {} +/- {}'.format(np.mean(compiled_depth_array), np.std(compiled_depth_array)))
print('average optimized circuit depth = {} +/- {}'.format(np.mean(optimized_depth_array), np.std(optimized_depth_array)))
print('average increase in depth from simple compiler = {}%'.format(np.round(np.mean(ratio_simple), decimals=2)))
print('average increase in depth from optimized compiler = {}%'.format(np.round(np.mean(ratio_opt), decimals=2)))
plt.plot(depth_array, label='circuit')
plt.plot(compiled_depth_array, label='compiled')
plt.plot(optimized_depth_array, label='optimized')
plt.legend()
plt.show()
## Here we develop the router:
topology = {0:[4, 1], 1:[0, 2], 2:[1, 3], 3:[2, 4], 4:[3, 0]} # the key is the qbit and the value is the set of connected qbits
# the order of the qbit indicies in the value lst correspond to
# a natural orientation for traversing the ring
def get_path(topology, start, end):
''' Takes a dict (topology) representing the geometry of the
connections, an int (start) representing the starting index
and an int (end) representing the ending index and returns
a list corresponding to the shortest path from end --> start
(assuming a ring topology)'''
path_cw = [] # initialize the clockwise traversed path
path_ccw = [] # initialize the counter clockwise traversed path
path_cw.append(end)
path_ccw.append(end) # add the first point
current = end
while start not in topology[current]: # traverse clockwise while adding each intermediate qbit index
current = topology[current][1]
path_cw.append(current)
path_cw.append(start)
current = end
while start not in topology[current]: # traverse counter clockwise while adding each intermediate qbit index
current = topology[current][0]
path_ccw.append(current)
path_ccw.append(start)
if len(path_cw) <= len(path_ccw): # return the shorter among the two paths
return path_cw
else:
return path_ccw
def get_swaps(path):
'''Take a list (path) between an end qbit index and
a start qbit index. Return a list of tuples (replacement_gates)
which correspond to the set of swap gates required to swap the
end qbit with the start qbit'''
replacement_gates = []
for i in range(len(path) - 1): # iterate over the path
swap = ('S', [path[i], path[i+1]], []) # swap gate between consecutive qbits along the path
replacement_gates.append(swap)
# at this point, we have shifted each qbit along the path
fix_offset = replacement_gates[:-1] # by 1, this may be a problem if we swapped the control qbit
fix_offset.reverse() # along the path, so we need to fix the shift before we go ahead
replacement_gates += fix_offset # this simply involves performing the same swaps in the reverse order
# excluding the very final swap in the first case
return replacement_gates
def circ_router(circ, topology):
'''Takes a compiled circuit, and a topology to produce a
properly routed circuit. '''
gate_lst, num_qbits = utils.read_circ(circ)
for index, gate in enumerate(gate_lst): # iterate through the circuit
curr_gate_str = gate_lst[index][0]
curr_qbit_lst = gate_lst[index][1]
curr_parms = gate_lst[index][2]
if curr_gate_str == 'Cz': # check if this gate is a cz gate
cntrl_qbit = curr_qbit_lst[0]
trgt_qbit = curr_qbit_lst[1]
if not trgt_qbit in topology[cntrl_qbit]: # check if the control and target qbits are 'connected'
new_target = topology[cntrl_qbit][1] # if not, choose a qbit that is connected to the control
# to be the new target qbit
path = get_path(topology, new_target, trgt_qbit) # find the path between the new target and the old target
first_swaps = get_swaps(path) # the swap gates required to swap new_target w/ old target
path.reverse()
swap_backs = get_swaps(path) # the swap gates required to swap them back to original
replacement_lst = first_swaps + [(curr_gate_str, [cntrl_qbit, new_target], curr_parms)] + swap_backs
del gate_lst[index] # we delete the old Cz gate
for j, replacement in enumerate(replacement_lst): # and add the swap + cz gate + swap back gates
gate_lst.insert(index + j, replacement)
compiled_circ = utils.write_circ(gate_lst, num_qbits) #
return compiled_circ
## Testing the qbit router:
topology = {0:[4, 1], 1:[0, 2], 2:[1, 3], 3:[2, 4], 4:[3, 0]} # the key is the qbit and the value is the set of connected qbits
for i in range(1000):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # generate random circuit
# print(circ)
# print('\n\n')
compiled_circ = compiler(circ) # compile it
# print(compiled_circ)
# print('\n\n')
routed_circ = circ_router(compiled_circ, topology) # route it
# print(routed_circ)
equal1 = utils.circ_equal(circ, routed_circ) # make sure the routed circuit matches the original circuit
equal2 = utils.circ_equal(compiled_circ, routed_circ) # make sure the routed circuit matches the compiled circuit
if not equal1.all():
print(equal1)
break
if not equal2.all():
print(equal2)
break
print('passed test #{}'.format(i+1))
# Here we analyze the overhead (again, again)
topology = {0:[4, 1], 1:[0, 2], 2:[1, 3], 3:[2, 4], 4:[3, 0]}
depth_array = []
compiled_depth_array = []
optimized_depth_array = []
routed_depth_array = []
ratio_simple = []
ratio_opt = []
ratio_routed = []
for i in range(100):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # randomly generate a circuit with 5 qbits and 15 gates
compiled_circ = simple_compiler(circ)
optimized_circ = compiler(circ)
routed_circ = circ_router(optimized_circ, topology)
equal1 = utils.circ_equal(circ, compiled_circ)
equal2 = utils.circ_equal(circ, optimized_circ)
equal3 = utils.circ_equal(circ, routed_circ)
if not equal1.all(): # make sure we are compiling and routing properly!
print("simple compiler FAIL @ circuit {}".format(i))
break
if not equal2.all():
print("optimized compiler FAIL @ circuit {}".format(i))
break
if not equal3.all():
print("circuit router FAIL @ circuit {}".format(i))
break
depth_circ = circ.depth()
depth_comp = compiled_circ.depth()
depth_opt = optimized_circ.depth()
depth_rout = routed_circ.depth()
depth_array.append(depth_circ) # store depth
compiled_depth_array.append(depth_comp) # store new circuit depth
optimized_depth_array.append(depth_opt) # store optimized circuit depth
routed_depth_array.append(depth_rout)
ratio_simple.append(((depth_comp - depth_circ) / depth_circ) * 100)
ratio_opt.append(((depth_opt - depth_circ) / depth_circ) * 100)
ratio_routed.append(((depth_rout - depth_circ) / depth_circ) * 100)
print('average initial circuit depth = {} +/- {}'.format(np.mean(depth_array), np.std(depth_array)))
print('average compiled circuit depth = {} +/- {}'.format(np.mean(compiled_depth_array), np.std(compiled_depth_array)))
print('average optimized circuit depth = {} +/- {}'.format(np.mean(optimized_depth_array), np.std(optimized_depth_array)))
print('average routed circuit depth = {} +/- {}'.format(np.mean(routed_depth_array), np.std(routed_depth_array)))
print('average increase in depth from simple compiler = {}%'.format(np.round(np.mean(ratio_simple), decimals=2)))
print('average increase in depth from optimized compiler = {}%'.format(np.round(np.mean(ratio_opt), decimals=2)))
print('average increase in depth from circuit rout = {}%'.format(np.round(np.mean(ratio_routed), decimals=2)))
plt.plot(depth_array, label='circuit')
plt.plot(compiled_depth_array, label='compiled')
plt.plot(optimized_depth_array, label='optimized')
plt.plot(routed_depth_array, label='routed', linestyle='--')
plt.legend()
plt.show()
|
https://github.com/ColibrITD-SAS/mpqp
|
ColibrITD-SAS
|
# Copyright 2019 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import qiskit
from typing import Tuple, Iterable
from qiskit import IBMQ, QuantumCircuit
from qiskit.compiler import assemble
from qiskit.tools.monitor import job_monitor
from pytket.backends import Backend
from pytket.qiskit import tk_to_qiskit
from pytket._routing import route, Architecture
from pytket._transform import Transform
from pytket._circuit import Circuit
import numpy as np
VALID_BACKEND_GATES = (
qiskit.extensions.standard.u1.U1Gate,
qiskit.extensions.standard.u2.U2Gate,
qiskit.extensions.standard.u3.U3Gate,
qiskit.extensions.standard.cx.CnotGate,
qiskit.circuit.measure.Measure
)
def _qiskit_circ_valid(qc: QuantumCircuit, coupling:Iterable[Tuple[int]] ) -> bool:
valid = True
measure_count = 0
for instruction in qc:
if type(instruction[0]) not in VALID_BACKEND_GATES:
valid = False
break
if isinstance(instruction[0], qiskit.circuit.measure.Measure):
measure_count += 1
if len(instruction[1]) > 1:
control = instruction[1][0][1]
target = instruction[1][1][1]
if [control, target] not in coupling:
valid =False
break
return valid, (measure_count > 0)
def _routed_ibmq_circuit(circuit:Circuit, arc: Architecture) -> QuantumCircuit:
c = circuit.copy()
Transform.RebaseToQiskit().apply(c)
physical_c = route(c, arc)
physical_c.decompose_SWAP_to_CX()
physical_c.redirect_CX_gates(arc)
Transform.OptimisePostRouting().apply(physical_c)
qc = tk_to_qiskit(physical_c)
return qc
def _convert_bin_str(string) :
return [int(b) for b in string.replace(' ', '')][::-1]
class IBMQBackend(Backend) :
def __init__(self, backend_name:str, monitor:bool=True) :
"""A backend for running circuits on remote IBMQ devices.
:param backend_name: name of ibmq device. e.g. `ibmqx4`, `ibmq_16_melbourne`.
:type backend_name: str
:param monitor: Use IBM job monitor, defaults to True
:type monitor: bool, optional
:raises ValueError: If no IBMQ account has been set up.
"""
if len(IBMQ.stored_accounts()) ==0:
raise ValueError('No IBMQ credentials found on disk. Store some first.')
IBMQ.load_accounts()
self._backend = IBMQ.get_backend(backend_name)
self.config = self._backend.configuration()
self.coupling = self.config.coupling_map
self.architecture = Architecture(self.coupling)
self._monitor = monitor
def run(self, circuit:Circuit, shots:int, fit_to_constraints:bool=True) -> np.ndarray :
if fit_to_constraints:
qc = _routed_ibmq_circuit(circuit, self.architecture)
else:
qc = tk_to_qiskit(circuit)
valid, measures = _qiskit_circ_valid(qc, self.coupling)
if not valid:
raise RuntimeWarning("QuantumCircuit does not pass validity test, will likely fail on remote backend.")
if not measures:
raise RuntimeWarning("Measure gates are required for output.")
qobj = assemble(qc, shots=shots, memory=self.config.memory)
job = self._backend.run(qobj)
if self._monitor :
job_monitor(job)
shot_list = []
if self.config.memory:
shot_list = job.result().get_memory(qc)
else:
for string, count in job.result().get_counts().items():
shot_list += [string]*count
return np.asarray([_convert_bin_str(shot) for shot in shot_list])
def get_counts(self, circuit, shots, fit_to_constraints=True) :
"""
Run the circuit on the backend and accumulate the results into a summary of counts
:param circuit: The circuit to run
:param shots: Number of shots (repeats) to run
:param fit_to_constraints: Compile the circuit to meet the constraints of the backend, defaults to True
:param seed: Random seed to for simulator
:return: Dictionary mapping bitvectors of results to number of times that result was observed (zero counts are omitted)
"""
if fit_to_constraints:
qc = _routed_ibmq_circuit(circuit, self.architecture)
else:
qc = tk_to_qiskit(circuit)
valid, measures = _qiskit_circ_valid(qc, self.coupling)
if not valid:
raise RuntimeWarning("QuantumCircuit does not pass validity test, will likely fail on remote backend.")
if not measures:
raise RuntimeWarning("Measure gates are required for output.")
qobj = assemble(qc, shots=shots)
job = self._backend.run(qobj)
counts = job.result().get_counts(qc)
return {tuple(_convert_bin_str(b)) : c for b, c in counts.items()}
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the NoiseAdaptiveLayout pass"""
from datetime import datetime
import unittest
from qiskit.transpiler.passes import NoiseAdaptiveLayout
from qiskit.converters import circuit_to_dag
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
from qiskit.providers.models import BackendProperties
from qiskit.providers.models.backendproperties import Nduv, Gate
def make_qubit_with_error(readout_error):
"""Create a qubit for BackendProperties"""
calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0)
return [
Nduv(name="T1", date=calib_time, unit="Β΅s", value=100.0),
Nduv(name="T2", date=calib_time, unit="Β΅s", value=100.0),
Nduv(name="frequency", date=calib_time, unit="GHz", value=5.0),
Nduv(name="readout_error", date=calib_time, unit="", value=readout_error),
]
class TestNoiseAdaptiveLayout(QiskitTestCase):
"""Tests the NoiseAdaptiveLayout pass."""
def test_on_linear_topology(self):
"""
Test that the mapper identifies the correct gate in a linear topology
"""
calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0)
qr = QuantumRegister(2, name="q")
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
qubit_list = []
ro_errors = [0.01, 0.01, 0.01]
for ro_error in ro_errors:
qubit_list.append(make_qubit_with_error(ro_error))
p01 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.9)]
g01 = Gate(name="CX0_1", gate="cx", parameters=p01, qubits=[0, 1])
p12 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.1)]
g12 = Gate(name="CX1_2", gate="cx", parameters=p12, qubits=[1, 2])
gate_list = [g01, g12]
bprop = BackendProperties(
last_update_date=calib_time,
backend_name="test_backend",
qubits=qubit_list,
backend_version="1.0.0",
gates=gate_list,
general=[],
)
nalayout = NoiseAdaptiveLayout(bprop)
nalayout.run(dag)
initial_layout = nalayout.property_set["layout"]
self.assertNotEqual(initial_layout[qr[0]], 0)
self.assertNotEqual(initial_layout[qr[1]], 0)
def test_bad_readout(self):
"""Test that the mapper avoids bad readout unit"""
calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0)
qr = QuantumRegister(2, name="q")
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
qubit_list = []
ro_errors = [0.01, 0.01, 0.8]
for ro_error in ro_errors:
qubit_list.append(make_qubit_with_error(ro_error))
p01 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.1)]
g01 = Gate(name="CX0_1", gate="cx", parameters=p01, qubits=[0, 1])
p12 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.1)]
g12 = Gate(name="CX1_2", gate="cx", parameters=p12, qubits=[1, 2])
gate_list = [g01, g12]
bprop = BackendProperties(
last_update_date=calib_time,
backend_name="test_backend",
qubits=qubit_list,
backend_version="1.0.0",
gates=gate_list,
general=[],
)
nalayout = NoiseAdaptiveLayout(bprop)
nalayout.run(dag)
initial_layout = nalayout.property_set["layout"]
self.assertNotEqual(initial_layout[qr[0]], 2)
self.assertNotEqual(initial_layout[qr[1]], 2)
def test_grid_layout(self):
"""
Test that the mapper identifies best location for a star-like program graph
Machine row1: (0, 1, 2)
Machine row2: (3, 4, 5)
"""
calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0)
qr = QuantumRegister(4, name="q")
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[3])
circuit.cx(qr[1], qr[3])
circuit.cx(qr[2], qr[3])
dag = circuit_to_dag(circuit)
qubit_list = []
ro_errors = [0.01] * 6
for ro_error in ro_errors:
qubit_list.append(make_qubit_with_error(ro_error))
p01 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.3)]
p03 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.3)]
p12 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.3)]
p14 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.1)]
p34 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.1)]
p45 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.1)]
p25 = [Nduv(date=calib_time, name="gate_error", unit="", value=0.3)]
g01 = Gate(name="CX0_1", gate="cx", parameters=p01, qubits=[0, 1])
g03 = Gate(name="CX0_3", gate="cx", parameters=p03, qubits=[0, 3])
g12 = Gate(name="CX1_2", gate="cx", parameters=p12, qubits=[1, 2])
g14 = Gate(name="CX1_4", gate="cx", parameters=p14, qubits=[1, 4])
g34 = Gate(name="CX3_4", gate="cx", parameters=p34, qubits=[3, 4])
g45 = Gate(name="CX4_5", gate="cx", parameters=p45, qubits=[4, 5])
g25 = Gate(name="CX2_5", gate="cx", parameters=p25, qubits=[2, 5])
gate_list = [g01, g03, g12, g14, g34, g45, g25]
bprop = BackendProperties(
last_update_date=calib_time,
backend_name="test_backend",
qubits=qubit_list,
backend_version="1.0.0",
gates=gate_list,
general=[],
)
nalayout = NoiseAdaptiveLayout(bprop)
nalayout.run(dag)
initial_layout = nalayout.property_set["layout"]
for qid in range(4):
for qloc in [0, 2]:
self.assertNotEqual(initial_layout[qr[qid]], qloc)
if __name__ == "__main__":
unittest.main()
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import pulse
d0 = pulse.DriveChannel(0)
x90 = pulse.Gaussian(10, 0.1, 3)
x180 = pulse.Gaussian(10, 0.2, 3)
def udd10_pos(j):
return np.sin(np.pi*j/(2*10 + 2))**2
with pulse.build() as udd_sched:
pulse.play(x90, d0)
with pulse.align_func(duration=300, func=udd10_pos):
for _ in range(10):
pulse.play(x180, d0)
pulse.play(x90, d0)
udd_sched.draw()
|
https://github.com/MonitSharma/Qiskit-Summer-School-and-Quantum-Challenges
|
MonitSharma
|
import json
import logging
import numpy as np
import warnings
from functools import wraps
from typing import Any, Callable, Optional, Tuple, Union
from qiskit import IBMQ, QuantumCircuit, assemble
from qiskit.circuit import Barrier, Gate, Instruction, Measure
from qiskit.circuit.library import UGate, U3Gate, CXGate
from qiskit.providers.ibmq import AccountProvider, IBMQProviderError
from qiskit.providers.ibmq.job import IBMQJob
def get_provider() -> AccountProvider:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ibmq_logger = logging.getLogger('qiskit.providers.ibmq')
current_level = ibmq_logger.level
ibmq_logger.setLevel(logging.ERROR)
# get provider
try:
provider = IBMQ.get_provider()
except IBMQProviderError:
provider = IBMQ.load_account()
ibmq_logger.setLevel(current_level)
return provider
def get_job(job_id: str) -> Optional[IBMQJob]:
try:
job = get_provider().backends.retrieve_job(job_id)
return job
except Exception:
pass
return None
def circuit_to_json(qc: QuantumCircuit) -> str:
class _QobjEncoder(json.encoder.JSONEncoder):
def default(self, obj: Any) -> Any:
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, complex):
return (obj.real, obj.imag)
return json.JSONEncoder.default(self, obj)
return json.dumps(circuit_to_dict(qc), cls=_QobjEncoder)
def circuit_to_dict(qc: QuantumCircuit) -> dict:
qobj = assemble(qc)
return qobj.to_dict()
def get_job_urls(job: Union[str, IBMQJob]) -> Tuple[bool, Optional[str], Optional[str]]:
try:
job_id = job.job_id() if isinstance(job, IBMQJob) else job
download_url = get_provider()._api_client.account_api.job(job_id).download_url()['url']
result_url = get_provider()._api_client.account_api.job(job_id).result_url()['url']
return download_url, result_url
except Exception:
return None, None
def cached(key_function: Callable) -> Callable:
def _decorator(f: Any) -> Callable:
f.__cache = {}
@wraps(f)
def _decorated(*args: Any, **kwargs: Any) -> int:
key = key_function(*args, **kwargs)
if key not in f.__cache:
f.__cache[key] = f(*args, **kwargs)
return f.__cache[key]
return _decorated
return _decorator
def gate_key(gate: Gate) -> Tuple[str, int]:
return gate.name, gate.num_qubits
@cached(gate_key)
def gate_cost(gate: Gate) -> int:
if isinstance(gate, (UGate, U3Gate)):
return 1
elif isinstance(gate, CXGate):
return 10
elif isinstance(gate, (Measure, Barrier)):
return 0
return sum(map(gate_cost, (g for g, _, _ in gate.definition.data)))
def compute_cost(circuit: Union[Instruction, QuantumCircuit]) -> int:
print('Computing cost...')
circuit_data = None
if isinstance(circuit, QuantumCircuit):
circuit_data = circuit.data
elif isinstance(circuit, Instruction):
circuit_data = circuit.definition.data
else:
raise Exception(f'Unable to obtain circuit data from {type(circuit)}')
return sum(map(gate_cost, (g for g, _, _ in circuit_data)))
def uses_multiqubit_gate(circuit: QuantumCircuit) -> bool:
circuit_data = None
if isinstance(circuit, QuantumCircuit):
circuit_data = circuit.data
elif isinstance(circuit, Instruction) and circuit.definition is not None:
circuit_data = circuit.definition.data
else:
raise Exception(f'Unable to obtain circuit data from {type(circuit)}')
for g, _, _ in circuit_data:
if isinstance(g, (Barrier, Measure)):
continue
elif isinstance(g, Gate):
if g.num_qubits > 1:
return True
elif isinstance(g, (QuantumCircuit, Instruction)) and uses_multiqubit_gate(g):
return True
return False
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for visualization tools."""
import unittest
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit import Qubit, Clbit
from qiskit.visualization.circuit import _utils
from qiskit.visualization import array_to_latex
from qiskit.test import QiskitTestCase
from qiskit.utils import optionals
class TestVisualizationUtils(QiskitTestCase):
"""Tests for circuit drawer utilities."""
def setUp(self):
super().setUp()
self.qr1 = QuantumRegister(2, "qr1")
self.qr2 = QuantumRegister(2, "qr2")
self.cr1 = ClassicalRegister(2, "cr1")
self.cr2 = ClassicalRegister(2, "cr2")
self.circuit = QuantumCircuit(self.qr1, self.qr2, self.cr1, self.cr2)
self.circuit.cx(self.qr2[0], self.qr2[1])
self.circuit.measure(self.qr2[0], self.cr2[0])
self.circuit.cx(self.qr2[1], self.qr2[0])
self.circuit.measure(self.qr2[1], self.cr2[1])
self.circuit.cx(self.qr1[0], self.qr1[1])
self.circuit.measure(self.qr1[0], self.cr1[0])
self.circuit.cx(self.qr1[1], self.qr1[0])
self.circuit.measure(self.qr1[1], self.cr1[1])
def test_get_layered_instructions(self):
"""_get_layered_instructions without reverse_bits"""
(qregs, cregs, layered_ops) = _utils._get_layered_instructions(self.circuit)
exp = [
[("cx", (self.qr2[0], self.qr2[1]), ()), ("cx", (self.qr1[0], self.qr1[1]), ())],
[("measure", (self.qr2[0],), (self.cr2[0],))],
[("measure", (self.qr1[0],), (self.cr1[0],))],
[("cx", (self.qr2[1], self.qr2[0]), ()), ("cx", (self.qr1[1], self.qr1[0]), ())],
[("measure", (self.qr2[1],), (self.cr2[1],))],
[("measure", (self.qr1[1],), (self.cr1[1],))],
]
self.assertEqual([self.qr1[0], self.qr1[1], self.qr2[0], self.qr2[1]], qregs)
self.assertEqual([self.cr1[0], self.cr1[1], self.cr2[0], self.cr2[1]], cregs)
self.assertEqual(
exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_reverse_bits(self):
"""_get_layered_instructions with reverse_bits=True"""
(qregs, cregs, layered_ops) = _utils._get_layered_instructions(
self.circuit, reverse_bits=True
)
exp = [
[("cx", (self.qr2[0], self.qr2[1]), ()), ("cx", (self.qr1[0], self.qr1[1]), ())],
[("measure", (self.qr2[0],), (self.cr2[0],))],
[("measure", (self.qr1[0],), (self.cr1[0],)), ("cx", (self.qr2[1], self.qr2[0]), ())],
[("cx", (self.qr1[1], self.qr1[0]), ())],
[("measure", (self.qr2[1],), (self.cr2[1],))],
[("measure", (self.qr1[1],), (self.cr1[1],))],
]
self.assertEqual([self.qr2[1], self.qr2[0], self.qr1[1], self.qr1[0]], qregs)
self.assertEqual([self.cr2[1], self.cr2[0], self.cr1[1], self.cr1[0]], cregs)
self.assertEqual(
exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_remove_idle_wires(self):
"""_get_layered_instructions with idle_wires=False"""
qr1 = QuantumRegister(3, "qr1")
qr2 = QuantumRegister(3, "qr2")
cr1 = ClassicalRegister(3, "cr1")
cr2 = ClassicalRegister(3, "cr2")
circuit = QuantumCircuit(qr1, qr2, cr1, cr2)
circuit.cx(qr2[0], qr2[1])
circuit.measure(qr2[0], cr2[0])
circuit.cx(qr2[1], qr2[0])
circuit.measure(qr2[1], cr2[1])
circuit.cx(qr1[0], qr1[1])
circuit.measure(qr1[0], cr1[0])
circuit.cx(qr1[1], qr1[0])
circuit.measure(qr1[1], cr1[1])
(qregs, cregs, layered_ops) = _utils._get_layered_instructions(circuit, idle_wires=False)
exp = [
[("cx", (qr2[0], qr2[1]), ()), ("cx", (qr1[0], qr1[1]), ())],
[("measure", (qr2[0],), (cr2[0],))],
[("measure", (qr1[0],), (cr1[0],))],
[("cx", (qr2[1], qr2[0]), ()), ("cx", (qr1[1], qr1[0]), ())],
[("measure", (qr2[1],), (cr2[1],))],
[("measure", (qr1[1],), (cr1[1],))],
]
self.assertEqual([qr1[0], qr1[1], qr2[0], qr2[1]], qregs)
self.assertEqual([cr1[0], cr1[1], cr2[0], cr2[1]], cregs)
self.assertEqual(
exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_left_justification_simple(self):
"""Test _get_layered_instructions left justification simple since #2802
q_0: |0>ββββββββ ββ
βββββ β
q_1: |0>β€ H ββββΌββ
βββββ€ β
q_2: |0>β€ H ββββΌββ
ββββββββ΄ββ
q_3: |0>ββββββ€ X β
βββββ
"""
qc = QuantumCircuit(4)
qc.h(1)
qc.h(2)
qc.cx(0, 3)
(_, _, layered_ops) = _utils._get_layered_instructions(qc, justify="left")
l_exp = [
[
("h", (Qubit(QuantumRegister(4, "q"), 1),), ()),
("h", (Qubit(QuantumRegister(4, "q"), 2),), ()),
],
[("cx", (Qubit(QuantumRegister(4, "q"), 0), Qubit(QuantumRegister(4, "q"), 3)), ())],
]
self.assertEqual(
l_exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_right_justification_simple(self):
"""Test _get_layered_instructions right justification simple since #2802
q_0: |0>βββ βββββββ
β βββββ
q_1: |0>βββΌβββ€ H β
β βββββ€
q_2: |0>βββΌβββ€ H β
βββ΄βββββββ
q_3: |0>β€ X ββββββ
βββββ
"""
qc = QuantumCircuit(4)
qc.h(1)
qc.h(2)
qc.cx(0, 3)
(_, _, layered_ops) = _utils._get_layered_instructions(qc, justify="right")
r_exp = [
[("cx", (Qubit(QuantumRegister(4, "q"), 0), Qubit(QuantumRegister(4, "q"), 3)), ())],
[
("h", (Qubit(QuantumRegister(4, "q"), 1),), ()),
("h", (Qubit(QuantumRegister(4, "q"), 2),), ()),
],
]
self.assertEqual(
r_exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_left_justification_less_simple(self):
"""Test _get_layered_instructions left justification
less simple example since #2802
βββββββββββββββββββββββββββββββββ ββββββββββββββββββββββββββββββββββββ
q_0: |0>β€ U2(0,pi/1) ββ€ X ββ€ U2(0,pi/1) ββββββββββββββββ€Mββ€ U2(0,pi/1) ββ€ X ββ€ U2(0,pi/1) β
ββββββββββββββ€βββ¬ββββββββββββββββ€ββββββββββββββββ₯ββββββββββββββββββ¬ββββββββββββββββ€
q_1: |0>β€ U2(0,pi/1) ββββ βββ€ U2(0,pi/1) ββ€ U2(0,pi/1) βββ«ββββββββββββββββββ βββ€ U2(0,pi/1) β
ββββββββββββββ ββββββββββββββββββββββββββββ β ββββββββββββββ
q_2: |0>βββββββββββββββββββββββββββββββββββββββββββββββββ«ββββββββββββββββββββββββββββββββββ
β
q_3: |0>βββββββββββββββββββββββββββββββββββββββββββββββββ«ββββββββββββββββββββββββββββββββββ
β
q_4: |0>βββββββββββββββββββββββββββββββββββββββββββββββββ«ββββββββββββββββββββββββββββββββββ
β
c1_0: 0 βββββββββββββββββββββββββββββββββββββββββββββββββ©ββββββββββββββββββββββββββββββββββ
"""
qasm = """
OPENQASM 2.0;
include "qelib1.inc";
qreg q[5];
creg c1[1];
u2(0,3.14159265358979) q[0];
u2(0,3.14159265358979) q[1];
cx q[1],q[0];
u2(0,3.14159265358979) q[0];
u2(0,3.14159265358979) q[1];
u2(0,3.14159265358979) q[1];
measure q[0] -> c1[0];
u2(0,3.14159265358979) q[0];
cx q[1],q[0];
u2(0,3.14159265358979) q[0];
u2(0,3.14159265358979) q[1];
"""
qc = QuantumCircuit.from_qasm_str(qasm)
(_, _, layered_ops) = _utils._get_layered_instructions(qc, justify="left")
l_exp = [
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
[("cx", (Qubit(QuantumRegister(5, "q"), 1), Qubit(QuantumRegister(5, "q"), 0)), ())],
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
[("u2", (Qubit(QuantumRegister(5, "q"), 1),), ())],
[
(
"measure",
(Qubit(QuantumRegister(5, "q"), 0),),
(Clbit(ClassicalRegister(1, "c1"), 0),),
)
],
[("u2", (Qubit(QuantumRegister(5, "q"), 0),), ())],
[("cx", (Qubit(QuantumRegister(5, "q"), 1), Qubit(QuantumRegister(5, "q"), 0)), ())],
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
]
self.assertEqual(
l_exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_right_justification_less_simple(self):
"""Test _get_layered_instructions right justification
less simple example since #2802
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
q_0: |0>β€ U2(0,pi/1) ββ€ X ββ€ U2(0,pi/1) ββ€Mββ€ U2(0,pi/1) ββ€ X ββ€ U2(0,pi/1) β
ββββββββββββββ€βββ¬ββββββββββββββββ€ββ₯βββββββββββββββ€βββ¬ββββββββββββββββ€
q_1: |0>β€ U2(0,pi/1) ββββ βββ€ U2(0,pi/1) βββ«ββ€ U2(0,pi/1) ββββ βββ€ U2(0,pi/1) β
ββββββββββββββ ββββββββββββββ β ββββββββββββββ ββββββββββββββ
q_2: |0>βββββββββββββββββββββββββββββββββββ«ββββββββββββββββββββββββββββββββββ
β
q_3: |0>βββββββββββββββββββββββββββββββββββ«ββββββββββββββββββββββββββββββββββ
β
q_4: |0>βββββββββββββββββββββββββββββββββββ«ββββββββββββββββββββββββββββββββββ
β
c1_0: 0 βββββββββββββββββββββββββββββββββββ©ββββββββββββββββββββββββββββββββββ
"""
qasm = """
OPENQASM 2.0;
include "qelib1.inc";
qreg q[5];
creg c1[1];
u2(0,3.14159265358979) q[0];
u2(0,3.14159265358979) q[1];
cx q[1],q[0];
u2(0,3.14159265358979) q[0];
u2(0,3.14159265358979) q[1];
u2(0,3.14159265358979) q[1];
measure q[0] -> c1[0];
u2(0,3.14159265358979) q[0];
cx q[1],q[0];
u2(0,3.14159265358979) q[0];
u2(0,3.14159265358979) q[1];
"""
qc = QuantumCircuit.from_qasm_str(qasm)
(_, _, layered_ops) = _utils._get_layered_instructions(qc, justify="right")
r_exp = [
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
[("cx", (Qubit(QuantumRegister(5, "q"), 1), Qubit(QuantumRegister(5, "q"), 0)), ())],
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
[
(
"measure",
(Qubit(QuantumRegister(5, "q"), 0),),
(Clbit(ClassicalRegister(1, "c1"), 0),),
)
],
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
[("cx", (Qubit(QuantumRegister(5, "q"), 1), Qubit(QuantumRegister(5, "q"), 0)), ())],
[
("u2", (Qubit(QuantumRegister(5, "q"), 0),), ()),
("u2", (Qubit(QuantumRegister(5, "q"), 1),), ()),
],
]
self.assertEqual(
r_exp, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
def test_get_layered_instructions_op_with_cargs(self):
"""Test _get_layered_instructions op with cargs right of measure
ββββββββ
q_0: |0>β€ H ββ€Mββββββββββββββ
βββββββ₯ββββββββββββββ
q_1: |0>βββββββ«ββ€0 β
β β add_circ β
c_0: 0 βββββββ©ββ‘0 β
βββββββββββββ
c_1: 0 βββββββββββββββββββββ
"""
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.measure(0, 0)
qc_2 = QuantumCircuit(1, 1, name="add_circ")
qc_2.h(0).c_if(qc_2.cregs[0], 1)
qc_2.measure(0, 0)
qc.append(qc_2, [1], [0])
(_, _, layered_ops) = _utils._get_layered_instructions(qc)
expected = [
[("h", (Qubit(QuantumRegister(2, "q"), 0),), ())],
[
(
"measure",
(Qubit(QuantumRegister(2, "q"), 0),),
(Clbit(ClassicalRegister(2, "c"), 0),),
)
],
[
(
"add_circ",
(Qubit(QuantumRegister(2, "q"), 1),),
(Clbit(ClassicalRegister(2, "c"), 0),),
)
],
]
self.assertEqual(
expected, [[(op.name, op.qargs, op.cargs) for op in ops] for ops in layered_ops]
)
@unittest.skipUnless(optionals.HAS_PYLATEX, "needs pylatexenc")
def test_generate_latex_label_nomathmode(self):
"""Test generate latex label default."""
self.assertEqual("abc", _utils.generate_latex_label("abc"))
@unittest.skipUnless(optionals.HAS_PYLATEX, "needs pylatexenc")
def test_generate_latex_label_nomathmode_utf8char(self):
"""Test generate latex label utf8 characters."""
self.assertEqual(
"{\\ensuremath{\\iiint}}X{\\ensuremath{\\forall}}Y",
_utils.generate_latex_label("βXβY"),
)
@unittest.skipUnless(optionals.HAS_PYLATEX, "needs pylatexenc")
def test_generate_latex_label_mathmode_utf8char(self):
"""Test generate latex label mathtext with utf8."""
self.assertEqual(
"abc_{\\ensuremath{\\iiint}}X{\\ensuremath{\\forall}}Y",
_utils.generate_latex_label("$abc_$βXβY"),
)
@unittest.skipUnless(optionals.HAS_PYLATEX, "needs pylatexenc")
def test_generate_latex_label_mathmode_underscore_outside(self):
"""Test generate latex label with underscore outside mathmode."""
self.assertEqual(
"abc_{\\ensuremath{\\iiint}}X{\\ensuremath{\\forall}}Y",
_utils.generate_latex_label("$abc$_βXβY"),
)
@unittest.skipUnless(optionals.HAS_PYLATEX, "needs pylatexenc")
def test_generate_latex_label_escaped_dollar_signs(self):
"""Test generate latex label with escaped dollarsign."""
self.assertEqual("${\\ensuremath{\\forall}}$", _utils.generate_latex_label(r"\$β\$"))
@unittest.skipUnless(optionals.HAS_PYLATEX, "needs pylatexenc")
def test_generate_latex_label_escaped_dollar_sign_in_mathmode(self):
"""Test generate latex label with escaped dollar sign in mathmode."""
self.assertEqual(
"a$bc_{\\ensuremath{\\iiint}}X{\\ensuremath{\\forall}}Y",
_utils.generate_latex_label(r"$a$bc$_βXβY"),
)
def test_array_to_latex(self):
"""Test array_to_latex produces correct latex string"""
matrix = [
[np.sqrt(1 / 2), 1 / 16, 1 / np.sqrt(8) + 3j, -0.5 + 0.5j],
[1 / 3 - 1 / 3j, np.sqrt(1 / 2) * 1j, 34.3210, -9 / 2],
]
matrix = np.array(matrix)
exp_str = (
"\\begin{bmatrix}\\frac{\\sqrt{2}}{2}&\\frac{1}{16}&"
"\\frac{\\sqrt{2}}{4}+3i&-\\frac{1}{2}+\\frac{i}{2}\\\\"
"\\frac{1}{3}+\\frac{i}{3}&\\frac{\\sqrt{2}i}{2}&34.321&-"
"\\frac{9}{2}\\\\\\end{bmatrix}"
)
result = array_to_latex(matrix, source=True).replace(" ", "").replace("\n", "")
self.assertEqual(exp_str, result)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
https://github.com/Bikramaditya0154/Quantum-Simulation-of-the-ground-states-of-Li-and-Li-2-using-Variational-Quantum-EIgensolver
|
Bikramaditya0154
|
from qiskit import Aer
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import (
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
molecule = Molecule(
geometry=[["Li", [0.0, 0.0, 0.0]]], charge=2, multiplicity=2
)
driver = ElectronicStructureMoleculeDriver(
molecule, basis="sto3g", driver_type=ElectronicStructureDriverType.PYSCF
)
es_problem = ElectronicStructureProblem(driver)
qubit_converter = QubitConverter(JordanWignerMapper())
from qiskit.providers.aer import StatevectorSimulator
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_nature.algorithms import VQEUCCFactory
quantum_instance = QuantumInstance(backend=Aer.get_backend("aer_simulator_statevector"))
vqe_solver = VQEUCCFactory(quantum_instance=quantum_instance)
from qiskit.algorithms import VQE
from qiskit.circuit.library import TwoLocal
tl_circuit = TwoLocal(
rotation_blocks=["h", "rx"],
entanglement_blocks="cz",
entanglement="full",
reps=2,
parameter_prefix="y",
)
another_solver = VQE(
ansatz=tl_circuit,
quantum_instance=QuantumInstance(Aer.get_backend("aer_simulator_statevector")),
)
from qiskit_nature.algorithms import GroundStateEigensolver
calc = GroundStateEigensolver(qubit_converter, vqe_solver)
res = calc.solve(es_problem)
print(res)
|
https://github.com/epelaaez/QuantumLibrary
|
epelaaez
|
from qiskit import *
from qiskit.visualization import plot_histogram
from qiskit.circuit.library import QFT, SwapGate
from qiskit.extensions import UnitaryGate
from random import randrange
from sympy import Matrix
import numpy as np
from fractions import Fraction
import pandas as pd
def euclids(a, b):
"""
Given two integers a and b, compute gcd(a, b) using Euclid's algorithm.
Parameters:
-----------
a: int
First integer for Euclid's algorithm
b: int
Second integer for Euclid's algorithm
Returns:
--------
b: int
Returns gcd(a, b) stored on input b
"""
while True:
r = a % b
if not r:
break
a = b
b = r
return b
def a2jmodN(a, j, N):
"""
Compute a^{2^j} (mod N) by repeated squaring
Parameters:
-----------
a: int
Value for a
j: int
Value for j
N: int
Value for N
Returns:
--------
a: int
a^{2^j} (mod N)
"""
for i in range(j):
a = np.mod(a**2, N)
return a
def mod_inv(a, N):
for i in range(N):
if (a * i) % N == 1: return i
raise Exception(f"Modular inverse of {a} mod {N} doesn't exist")
def set_initial(n, val_a):
"""
Construct gate to set initial state of register with size n to val_a.
For example n=4 with val_a=5 will apply X gates on qubits 0 and 2.
Parameters:
-----------
n: int
Size of register we want to initialize
val_a: int
Value to which we want to initialize
Returns:
--------
init_gate: Gate
Constructed gate
"""
if ((2**n) - 1) < val_a:
raise Exception(f'Cannot initialize {val_a} into given register, there are no sufficient qubits')
reg_a = QuantumRegister(n)
gate = QuantumCircuit(reg_a)
bin_a = "{0:b}".format(val_a).zfill(n)
for idx, i in enumerate(bin_a[::-1]):
if i == '1': gate.x(idx)
init_gate = gate.to_gate(label=f'Init {val_a}')
return init_gate
qc = QuantumCircuit(4)
init = set_initial(4, 5)
qc.append(init, [0,1,2,3])
qc = transpile(qc, basis_gates=['u', 'cx'])
qc.draw('mpl')
def adder(n, val_a, dag=False):
"""
Construct gate to add val_a into register b in the Fourier basis.
Register b must contain the number on the Fourier basis already.
The subtracter gate gives us b - a if b β₯ a or 2^{n+1}β(aβb) if b < a. It is obtained by inversing the adder.
Parameters:
-----------
n: QuantumRegister
Size of register b
val_a: int
Value to which register a will be initialized
dag: Boolean
If set to true, the dagger of the adder gate (the subtracter) is appended
Returns:
--------
adder_gate: Gate
Constructed gate
"""
bin_a = "{0:b}".format(val_a).zfill(n)
phase = lambda lam: np.array([[1, 0], [0, np.exp(1j * lam)]])
identity = np.array([[1, 0], [0, 1]])
arr_gates = []
for i in range(n):
qubit_gate = identity
for j in range(i, n):
if bin_a[j] == '1':
qubit_gate = phase(np.pi / (2 ** (j - i))) @ qubit_gate
arr_gates.append(qubit_gate)
unitary = arr_gates[0]
for i in range(1, len(arr_gates)):
unitary = np.kron(arr_gates[i], unitary)
adder_gate = UnitaryGate(unitary)
adder_gate.label = f"Add {val_a}"
if dag == True:
adder_gate = adder_gate.inverse()
adder_gate.label = f"Subtract {val_a}"
return adder_gate
b = QuantumRegister(4, name='q')
q = QuantumCircuit(b)
add_1 = 5
add_2 = 3
init = set_initial(b.size, add_1)
q.append(init, b[:])
qft = QFT(4, name="$QFT$")
q.append(qft, b[:])
add = adder(4, add_2, dag=True)
q.append(add, b[:])
qft_i = QFT(4, inverse=True, name="$QFT^\dag$")
q.append(qft_i, b[:])
display(q.draw('mpl'))
backend = Aer.get_backend('statevector_simulator')
q = transpile(q, basis_gates=['cx', 'u'])
result = backend.run(q).result()
counts = result.get_counts()
counts
def mod_adder(n, val_a, val_N):
"""
Construct gate to compute a + b mod N in the Fourier basis.
Register b must contain the number on the Fourier basis already, and the answer will be in this register.
Parameters:
-----------
n: QuantumRegister
Size of register b
val_a: int
Value to add to register
val_N: int
We take mod of a + b respect to this value
Returns:
--------
mod_adder_gate: Gate
Constructed gate
"""
reg_c = QuantumRegister(2)
reg_b = QuantumRegister(n)
aux = QuantumRegister(1)
gate = QuantumCircuit(reg_c, reg_b, aux)
qft = QFT(n, name="$QFT$").to_gate()
qft_inv = QFT(n, inverse=True, name="$QFT^\dag$").to_gate()
gate.append(adder(n, val_a).control(2), reg_c[:] + reg_b[:])
gate.append(adder(n, val_N, dag=True), reg_b[:])
gate.append(qft_inv, reg_b[:])
gate.cx(reg_b[-1], aux[0])
gate.append(qft, reg_b[:])
gate.append(adder(n, val_N).control(1), aux[:] + reg_b[:])
gate.append(adder(n, val_a, dag=True).control(2), reg_c[:] + reg_b[:])
gate.append(qft_inv, reg_b[:])
gate.x(reg_b[-1])
gate.cx(reg_b[-1], aux[0])
gate.x(reg_b[-1])
gate.append(qft, reg_b[:])
gate.append(adder(n, val_a).control(2), reg_c[:] + reg_b[:])
mod_adder_gate = gate.to_gate(label=f"Add {val_a} mod {val_N}")
return mod_adder_gate
c = QuantumRegister(2, name='c')
b = QuantumRegister(4, name='b')
aux = QuantumRegister(1, name='aux')
clas = ClassicalRegister(4, name='cl')
aux_clas = ClassicalRegister(1, name='acl')
qc = QuantumCircuit(c,b,aux,clas,aux_clas)
val_a = 4
val_b = 7 # b < 2 ** (n - 1)
val_N = 8
init = set_initial(b.size, val_b)
qc.append(init, b[:])
qft = QFT(4)
qc.append(qft, b[:])
qc.x(c)
mod_add = mod_adder(b.size, val_a, val_N)
qc.append(mod_add, c[:] + b[:] + aux[:])
add = adder(b.size, 3)
qc.append(add, b[:])
qft_1 = QFT(4, inverse=True)
qc.append(qft_1, b[:])
qc.measure(b,clas)
qc.measure(aux,aux_clas)
display(qc.draw('mpl'))
backend = Aer.get_backend('qasm_simulator')
qc = transpile(qc, basis_gates=['u', 'cx'])
result = backend.run(qc).result()
counts = result.get_counts()
counts
def ctrl_mult(n, val_a, val_N, dag=False):
"""
Construct gate that computes (b + ax) mod N if control qubit is set to 1.
The gate transforms the value in register b to the Fourier basis within it.
Parameters:
-----------
n: QuantumRegister
Size of registers b and x
val_a: int
Value to multiply by x
val_N: int
We take mod of (b + ax) mod N respect to this value
dag: bool
If set to true, the dagger of the adder gate (the subtracter) is appended
Returns:
--------
ctrl_mult_gate: Gate
Constructed gate
"""
reg_c = QuantumRegister(1)
reg_x = QuantumRegister(n)
reg_b = QuantumRegister(n)
aux = QuantumRegister(1)
gate = QuantumCircuit(reg_c, reg_x, reg_b, aux)
qft = QFT(n, name="$QFT$").to_gate()
qft_inv = QFT(n, inverse=True, name="$QFT^\dag$").to_gate()
gate.append(qft, reg_b[:])
for i in range(n):
gate.append(mod_adder(n, (2**i) * val_a, val_N), reg_c[:] + [reg_x[:][i]] + reg_b[:] + aux[:])
gate.append(qft_inv, reg_b[:])
ctrl_mult_gate = gate.to_gate(label=f"Mult {val_a} mod {val_N}")
if dag == True:
ctrl_mult_gate = ctrl_mult_gate.inverse()
ctrl_mult_gate.label = f"Mult {val_a} mod {val_N} inv"
return ctrl_mult_gate
def u_a(n, val_a, val_N):
reg_c = QuantumRegister(1)
reg_x = QuantumRegister(n)
reg_y = QuantumRegister(n)
aux = QuantumRegister(1)
gate = QuantumCircuit(reg_c, reg_x, reg_y, aux)
gate.append(ctrl_mult(n, val_a, val_N), reg_c[:] + reg_x[:] + reg_y[:] + aux[:])
temp_qc = QuantumCircuit(2*n)
temp_qc.swap([i for i in range(n)], [i for i in range(n, 2*n)])
cswap = temp_qc.to_gate(label='CSWAP').control(1)
gate.append(cswap, reg_c[:] + reg_x[:] + reg_y[:])
gate.append(ctrl_mult(n, mod_inv(val_a, val_N), val_N, dag=True), reg_c[:] + reg_x[:] + reg_y[:] + aux[:])
u_a_gate = gate.to_gate(label=f"$U_{val_a}$")
return u_a_gate
val_a = 4
val_n = 7
val_x = 1
c = QuantumRegister(1, name='c')
x = QuantumRegister(5, name='x')
y = QuantumRegister(5, name='y')
aux = QuantumRegister(1, name='aux')
clas_0 = ClassicalRegister(5)
clas_1 = ClassicalRegister(5)
qc = QuantumCircuit(c, x, y, aux, clas_0, clas_1)
qc.x(c)
init_x = set_initial(5, val_x)
qc.append(init_x, x[:])
u = u_a(5, val_a, val_n)
qc.append(u, c[:] + x[:] + y[:] + aux[:])
qc.measure(x, clas_0)
qc.measure(y, clas_1)
display(qc.draw('mpl'))
print("Expected: ", (val_a * val_x) % val_n)
backend = Aer.get_backend('qasm_simulator')
qc = transpile(qc, basis_gates=['u', 'cx'])
result = backend.run(qc).result()
counts = result.get_counts()
print(counts)
n = len("{0:b}".format(15))
a = 2
N = 15
reg_a = QuantumRegister(2 * n, name='a')
reg_b = QuantumRegister(2 * n, name='b')
aux = QuantumRegister(1, name='aux')
clas = ClassicalRegister(2 * n, name='c')
qc = QuantumCircuit(reg_a, reg_b, aux, clas)
qc.h(reg_a)
qc.x(reg_b[0])
qc.append(u_a(n, a2jmodN(a, 0, N), N), [reg_a[:][0]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 1, N), N), [reg_a[:][1]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 2, N), N), [reg_a[:][2]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 3, N), N), [reg_a[:][3]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 4, N), N), [reg_a[:][4]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 5, N), N), [reg_a[:][5]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 6, N), N), [reg_a[:][6]] + reg_b[:] + aux[:])
qc.append(u_a(n, a2jmodN(a, 7, N), N), [reg_a[:][7]] + reg_b[:] + aux[:])
qc.append(QFT(2 * n, inverse=True), reg_a[:])
qc.barrier()
qc.measure(reg_a, clas)
qc.draw('mpl')
aer_sim = Aer.get_backend('aer_simulator')
qc = transpile(qc, aer_sim)
results = aer_sim.run(qc).result()
counts = results.get_counts()
plot_histogram(counts)
filtered_counts = []
for count in counts:
total += counts[count]
if counts[count] > 50:
filtered_counts.append(count)
filtered_counts
rows, measured_phases = [], []
for output in filtered_counts:
decimal = int(output, 2) # Convert (base 2) string to decimal
phase = decimal/(2**(2 * n)) # Find corresponding eigenvalue
measured_phases.append(phase)
# Add these values to the rows in our table:
rows.append([f"{output}(bin) = {decimal:>3}(dec)",
f"{decimal}/{2**(2 * n)} = {phase:.2f}"])
# Print the rows in a table
headers=["Register Output", "Phase"]
df = pd.DataFrame(rows, columns=headers)
print(df)
rows = []
for phase in measured_phases:
frac = Fraction(phase).limit_denominator(15)
rows.append([phase, f"{frac.numerator}/{frac.denominator}", frac.denominator])
r_guesses.append(frac.denominator)
# Print as a table
headers=["Phase", "Fraction", "Guess for r"]
df = pd.DataFrame(rows, columns=headers)
print(df)
r = 4
guesses = [euclids(a**(r//2)-1, N), euclids(a**(r//2)+1, N)]
print(guesses)
|
https://github.com/ElePT/qiskit-algorithms-test
|
ElePT
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Measurement Error Mitigation"""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from ddt import ddt, data, unpack
import numpy as np
import rustworkx as rx
from qiskit import QuantumCircuit, execute
from qiskit.quantum_info import Pauli
from qiskit.exceptions import QiskitError
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit_algorithms import VQE, QAOA
from qiskit.opflow import I, X, Z, PauliSumOp
from qiskit_algorithms.optimizers import SPSA, COBYLA
from qiskit.circuit.library import EfficientSU2
from qiskit.utils.mitigation import CompleteMeasFitter, TensoredMeasFitter
from qiskit.utils.measurement_error_mitigation import build_measurement_error_mitigation_circuits
from qiskit.utils import optionals
if optionals.HAS_AER:
# pylint: disable=no-name-in-module
from qiskit import Aer
from qiskit.providers.aer import noise
if optionals.HAS_IGNIS:
# pylint: disable=no-name-in-module
from qiskit.ignis.mitigation.measurement import (
CompleteMeasFitter as CompleteMeasFitter_IG,
TensoredMeasFitter as TensoredMeasFitter_IG,
)
@ddt
class TestMeasurementErrorMitigation(QiskitAlgorithmsTestCase):
"""Test measurement error mitigation."""
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
@data(
("CompleteMeasFitter", None, False),
("TensoredMeasFitter", None, False),
("TensoredMeasFitter", [[0, 1]], True),
("TensoredMeasFitter", [[1], [0]], False),
)
@unpack
def test_measurement_error_mitigation_with_diff_qubit_order(
self,
fitter_str,
mit_pattern,
fails,
):
"""measurement error mitigation with different qubit order"""
algorithm_globals.random_seed = 0
# build noise model
noise_model = noise.NoiseModel()
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]])
noise_model.add_all_qubit_readout_error(read_err)
fitter_cls = (
CompleteMeasFitter if fitter_str == "CompleteMeasFitter" else TensoredMeasFitter
)
backend = Aer.get_backend("aer_simulator")
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
backend=backend,
seed_simulator=1679,
seed_transpiler=167,
shots=1000,
noise_model=noise_model,
measurement_error_mitigation_cls=fitter_cls,
cals_matrix_refresh_period=0,
mit_pattern=mit_pattern,
)
# circuit
qc1 = QuantumCircuit(2, 2)
qc1.h(0)
qc1.cx(0, 1)
qc1.measure(0, 0)
qc1.measure(1, 1)
qc2 = QuantumCircuit(2, 2)
qc2.h(0)
qc2.cx(0, 1)
qc2.measure(1, 0)
qc2.measure(0, 1)
with self.assertWarns(DeprecationWarning):
if fails:
self.assertRaisesRegex(
QiskitError,
"Each element in the mit pattern should have length 1.",
quantum_instance.execute,
[qc1, qc2],
)
else:
quantum_instance.execute([qc1, qc2])
self.assertGreater(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
# failure case
qc3 = QuantumCircuit(3, 3)
qc3.h(2)
qc3.cx(1, 2)
qc3.measure(2, 1)
qc3.measure(1, 2)
with self.assertWarns(DeprecationWarning):
self.assertRaises(QiskitError, quantum_instance.execute, [qc1, qc3])
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
@data(("CompleteMeasFitter", None), ("TensoredMeasFitter", [[0], [1]]))
def test_measurement_error_mitigation_with_vqe(self, config):
"""measurement error mitigation test with vqe"""
fitter_str, mit_pattern = config
algorithm_globals.random_seed = 0
# build noise model
noise_model = noise.NoiseModel()
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]])
noise_model.add_all_qubit_readout_error(read_err)
fitter_cls = (
CompleteMeasFitter if fitter_str == "CompleteMeasFitter" else TensoredMeasFitter
)
backend = Aer.get_backend("aer_simulator")
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
backend=backend,
seed_simulator=167,
seed_transpiler=167,
noise_model=noise_model,
measurement_error_mitigation_cls=fitter_cls,
mit_pattern=mit_pattern,
)
optimizer = SPSA(maxiter=200)
ansatz = EfficientSU2(2, reps=1)
with self.assertWarns(DeprecationWarning):
h2_hamiltonian = (
-1.052373245772859 * (I ^ I)
+ 0.39793742484318045 * (I ^ Z)
- 0.39793742484318045 * (Z ^ I)
- 0.01128010425623538 * (Z ^ Z)
+ 0.18093119978423156 * (X ^ X)
)
with self.assertWarns(DeprecationWarning):
vqe = VQE(ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance)
result = vqe.compute_minimum_eigenvalue(operator=h2_hamiltonian)
self.assertGreater(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
self.assertAlmostEqual(result.eigenvalue.real, -1.86, delta=0.05)
def _get_operator(self, weight_matrix):
"""Generate Hamiltonian for the max-cut problem of a graph.
Args:
weight_matrix (numpy.ndarray) : adjacency matrix.
Returns:
PauliSumOp: operator for the Hamiltonian
float: a constant shift for the obj function.
"""
num_nodes = weight_matrix.shape[0]
pauli_list = []
shift = 0
for i in range(num_nodes):
for j in range(i):
if weight_matrix[i, j] != 0:
x_p = np.zeros(num_nodes, dtype=bool)
z_p = np.zeros(num_nodes, dtype=bool)
z_p[i] = True
z_p[j] = True
pauli_list.append([0.5 * weight_matrix[i, j], Pauli((z_p, x_p))])
shift -= 0.5 * weight_matrix[i, j]
opflow_list = [(pauli[1].to_label(), pauli[0]) for pauli in pauli_list]
with self.assertWarns(DeprecationWarning):
return PauliSumOp.from_list(opflow_list), shift
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
def test_measurement_error_mitigation_qaoa(self):
"""measurement error mitigation test with QAOA"""
algorithm_globals.random_seed = 167
backend = Aer.get_backend("aer_simulator")
w = rx.adjacency_matrix(
rx.undirected_gnp_random_graph(5, 0.5, seed=algorithm_globals.random_seed)
)
qubit_op, _ = self._get_operator(w)
initial_point = np.asarray([0.0, 0.0])
# Compute first without noise
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
backend=backend,
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
with self.assertWarns(DeprecationWarning):
qaoa = QAOA(
optimizer=COBYLA(maxiter=3),
quantum_instance=quantum_instance,
initial_point=initial_point,
)
result = qaoa.compute_minimum_eigenvalue(operator=qubit_op)
ref_eigenvalue = result.eigenvalue.real
# compute with noise
# build noise model
noise_model = noise.NoiseModel()
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]])
noise_model.add_all_qubit_readout_error(read_err)
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
backend=backend,
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
noise_model=noise_model,
measurement_error_mitigation_cls=CompleteMeasFitter,
shots=10000,
)
with self.assertWarns(DeprecationWarning):
qaoa = QAOA(
optimizer=COBYLA(maxiter=3),
quantum_instance=quantum_instance,
initial_point=initial_point,
)
result = qaoa.compute_minimum_eigenvalue(operator=qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, ref_eigenvalue, delta=0.05)
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
@unittest.skipUnless(optionals.HAS_IGNIS, "qiskit-ignis is required to run this test")
@data("CompleteMeasFitter", "TensoredMeasFitter")
def test_measurement_error_mitigation_with_diff_qubit_order_ignis(self, fitter_str):
"""measurement error mitigation with different qubit order"""
algorithm_globals.random_seed = 0
# build noise model
noise_model = noise.NoiseModel()
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]])
noise_model.add_all_qubit_readout_error(read_err)
fitter_cls = (
CompleteMeasFitter_IG if fitter_str == "CompleteMeasFitter" else TensoredMeasFitter_IG
)
backend = Aer.get_backend("aer_simulator")
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
backend=backend,
seed_simulator=1679,
seed_transpiler=167,
shots=1000,
noise_model=noise_model,
measurement_error_mitigation_cls=fitter_cls,
cals_matrix_refresh_period=0,
)
# circuit
qc1 = QuantumCircuit(2, 2)
qc1.h(0)
qc1.cx(0, 1)
qc1.measure(0, 0)
qc1.measure(1, 1)
qc2 = QuantumCircuit(2, 2)
qc2.h(0)
qc2.cx(0, 1)
qc2.measure(1, 0)
qc2.measure(0, 1)
if fitter_cls == TensoredMeasFitter_IG:
with self.assertWarnsRegex(DeprecationWarning, r".*ignis.*"):
self.assertRaisesRegex(
QiskitError,
"TensoredMeasFitter doesn't support subset_fitter.",
quantum_instance.execute,
[qc1, qc2],
)
else:
# this should run smoothly
with self.assertWarnsRegex(DeprecationWarning, r".*ignis.*"):
quantum_instance.execute([qc1, qc2])
self.assertGreater(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
# failure case
qc3 = QuantumCircuit(3, 3)
qc3.h(2)
qc3.cx(1, 2)
qc3.measure(2, 1)
qc3.measure(1, 2)
self.assertRaises(QiskitError, quantum_instance.execute, [qc1, qc3])
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
@unittest.skipUnless(optionals.HAS_IGNIS, "qiskit-ignis is required to run this test")
@data(("CompleteMeasFitter", None), ("TensoredMeasFitter", [[0], [1]]))
def test_measurement_error_mitigation_with_vqe_ignis(self, config):
"""measurement error mitigation test with vqe"""
fitter_str, mit_pattern = config
algorithm_globals.random_seed = 0
# build noise model
noise_model = noise.NoiseModel()
read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]])
noise_model.add_all_qubit_readout_error(read_err)
fitter_cls = (
CompleteMeasFitter_IG if fitter_str == "CompleteMeasFitter" else TensoredMeasFitter_IG
)
backend = Aer.get_backend("aer_simulator")
with self.assertWarns(DeprecationWarning):
quantum_instance = QuantumInstance(
backend=backend,
seed_simulator=167,
seed_transpiler=167,
noise_model=noise_model,
measurement_error_mitigation_cls=fitter_cls,
mit_pattern=mit_pattern,
)
h2_hamiltonian = (
-1.052373245772859 * (I ^ I)
+ 0.39793742484318045 * (I ^ Z)
- 0.39793742484318045 * (Z ^ I)
- 0.01128010425623538 * (Z ^ Z)
+ 0.18093119978423156 * (X ^ X)
)
optimizer = SPSA(maxiter=200)
ansatz = EfficientSU2(2, reps=1)
with self.assertWarnsRegex(DeprecationWarning):
vqe = VQE(ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance)
result = vqe.compute_minimum_eigenvalue(operator=h2_hamiltonian)
self.assertGreater(quantum_instance.time_taken, 0.0)
quantum_instance.reset_execution_results()
self.assertAlmostEqual(result.eigenvalue.real, -1.86, delta=0.05)
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
@unittest.skipUnless(optionals.HAS_IGNIS, "qiskit-ignis is required to run this test")
def test_calibration_results(self):
"""check that results counts are the same with/without error mitigation"""
algorithm_globals.random_seed = 1679
np.random.seed(algorithm_globals.random_seed)
qc = QuantumCircuit(1)
qc.x(0)
qc_meas = qc.copy()
qc_meas.measure_all()
backend = Aer.get_backend("aer_simulator")
counts_array = [None, None]
for idx, is_use_mitigation in enumerate([True, False]):
with self.assertWarns(DeprecationWarning):
if is_use_mitigation:
quantum_instance = QuantumInstance(
backend,
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
shots=1024,
measurement_error_mitigation_cls=CompleteMeasFitter_IG,
)
with self.assertWarnsRegex(DeprecationWarning, r".*ignis.*"):
counts_array[idx] = quantum_instance.execute(qc_meas).get_counts()
else:
quantum_instance = QuantumInstance(
backend,
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
shots=1024,
)
counts_array[idx] = quantum_instance.execute(qc_meas).get_counts()
self.assertEqual(
counts_array[0], counts_array[1], msg="Counts different with/without fitter."
)
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
def test_circuit_modified(self):
"""tests that circuits don't get modified on QI execute with error mitigation
as per issue #7449
"""
algorithm_globals.random_seed = 1679
np.random.seed(algorithm_globals.random_seed)
circuit = QuantumCircuit(1)
circuit.x(0)
circuit.measure_all()
with self.assertWarns(DeprecationWarning):
qi = QuantumInstance(
Aer.get_backend("aer_simulator"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
shots=1024,
measurement_error_mitigation_cls=CompleteMeasFitter,
)
# The error happens on transpiled circuits since "execute" was changing the input array
# Non transpiled circuits didn't have a problem because a new transpiled array was created
# internally.
circuits_ref = qi.transpile(circuit) # always returns a new array
circuits_input = circuits_ref.copy()
with self.assertWarns(DeprecationWarning):
_ = qi.execute(circuits_input, had_transpiled=True)
self.assertEqual(circuits_ref, circuits_input, msg="Transpiled circuit array modified.")
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required for this test")
def test_tensor_subset_fitter(self):
"""Test the subset fitter method of the tensor fitter."""
# Construct a noise model where readout has errors of different strengths.
noise_model = noise.NoiseModel()
# big error
read_err0 = noise.errors.readout_error.ReadoutError([[0.90, 0.10], [0.25, 0.75]])
# ideal
read_err1 = noise.errors.readout_error.ReadoutError([[1.00, 0.00], [0.00, 1.00]])
# small error
read_err2 = noise.errors.readout_error.ReadoutError([[0.98, 0.02], [0.03, 0.97]])
noise_model.add_readout_error(read_err0, (0,))
noise_model.add_readout_error(read_err1, (1,))
noise_model.add_readout_error(read_err2, (2,))
mit_pattern = [[idx] for idx in range(3)]
backend = Aer.get_backend("aer_simulator")
backend.set_options(seed_simulator=123)
with self.assertWarns(DeprecationWarning):
mit_circuits = build_measurement_error_mitigation_circuits(
[0, 1, 2],
TensoredMeasFitter,
backend,
backend_config={},
compile_config={},
mit_pattern=mit_pattern,
)
result = execute(mit_circuits[0], backend, noise_model=noise_model).result()
fitter = TensoredMeasFitter(result, mit_pattern=mit_pattern)
cal_matrices = fitter.cal_matrices
# Check that permutations and permuted subsets match.
for subset in [[1, 0], [1, 2], [0, 2], [2, 0, 1]]:
with self.subTest(subset=subset):
with self.assertWarns(DeprecationWarning):
new_fitter = fitter.subset_fitter(subset)
for idx, qubit in enumerate(subset):
self.assertTrue(np.allclose(new_fitter.cal_matrices[idx], cal_matrices[qubit]))
self.assertRaisesRegex(
QiskitError,
"Qubit 3 is not in the mit pattern",
fitter.subset_fitter,
[0, 2, 3],
)
# Test that we properly correct a circuit with permuted measurements.
circuit = QuantumCircuit(3, 3)
circuit.x(range(3))
circuit.measure(1, 0)
circuit.measure(2, 1)
circuit.measure(0, 2)
result = execute(
circuit, backend, noise_model=noise_model, shots=1000, seed_simulator=0
).result()
with self.subTest(subset=subset):
with self.assertWarns(DeprecationWarning):
new_result = fitter.subset_fitter([1, 2, 0]).filter.apply(result)
# The noisy result should have a poor 111 state, the mit. result should be good.
self.assertTrue(result.get_counts()["111"] < 800)
self.assertTrue(new_result.get_counts()["111"] > 990)
if __name__ == "__main__":
unittest.main()
|
https://github.com/indian-institute-of-science-qc/qiskit-aakash
|
indian-institute-of-science-qc
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utils for reading a user preference config files."""
import configparser
import os
from qiskit import exceptions
DEFAULT_FILENAME = os.path.join(os.path.expanduser("~"),
'.qiskit', 'settings.conf')
class UserConfig:
"""Class representing a user config file
The config file format should look like:
[default]
circuit_drawer = mpl
"""
def __init__(self, filename=None):
"""Create a UserConfig
Args:
filename (str): The path to the user config file. If one isn't
specified ~/.qiskit/settings.conf is used.
"""
if filename is None:
self.filename = DEFAULT_FILENAME
else:
self.filename = filename
self.settings = {}
self.config_parser = configparser.ConfigParser()
def read_config_file(self):
"""Read config file and parse the contents into the settings attr."""
if not os.path.isfile(self.filename):
return
self.config_parser.read(self.filename)
if 'default' in self.config_parser.sections():
circuit_drawer = self.config_parser.get('default',
'circuit_drawer')
if circuit_drawer:
if circuit_drawer not in ['text', 'mpl', 'latex',
'latex_source']:
raise exceptions.QiskitUserConfigError(
"%s is not a valid circuit drawer backend. Must be "
"either 'text', 'mpl', 'latex', or 'latex_source'"
% circuit_drawer)
self.settings['circuit_drawer'] = circuit_drawer
def get_config():
"""Read the config file from the default location or env var
It will read a config file at either the default location
~/.qiskit/settings.conf or if set the value of the QISKIT_SETTINGS env var.
It will return the parsed settings dict from the parsed config file.
Returns:
dict: The settings dict from the parsed config file.
"""
filename = os.getenv('QISKIT_SETTINGS', DEFAULT_FILENAME)
if not os.path.isfile(filename):
return {}
user_config = UserConfig(filename)
user_config.read_config_file()
return user_config.settings
|
https://github.com/Interlin-q/diskit
|
Interlin-q
|
import os
from qiskit import *
import qiskit.tools.visualization as qt
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from numpy import pi
import matplotlib.pyplot as plt
%matplotlib inline
qreg_q = QuantumRegister(2, 'q')
creg_c = ClassicalRegister(2, 'c')
circuits = []
for i in range(0, 4):
circuits.append(QuantumCircuit(qreg_q, creg_c))
circuits[0].reset(qreg_q[0])
circuits[0].reset(qreg_q[1])
circuits[0].cx(qreg_q[0], qreg_q[1])
circuits[0].measure(qreg_q[0], creg_c[0])
circuits[0].measure(qreg_q[1], creg_c[1])
circuits[0].draw(output='mpl')
simulator = Aer.get_backend('qasm_simulator')
result00 = execute(circuits[0], backend = simulator, shots = 1).result()
counts00 = result00.get_counts()
print(counts00)
qt.plot_histogram(counts00, title="Histogram with the evaluating of |00> state under CNOT transformation")
circuits[1].reset(qreg_q[0])
circuits[1].x(qreg_q[1])
circuits[1].cx(qreg_q[0], qreg_q[1])
circuits[1].measure(qreg_q[0], creg_c[0])
circuits[1].measure(qreg_q[1], creg_c[1])
circuits[1].draw(output="mpl")
simulator = Aer.get_backend('qasm_simulator')
result01 = execute(circuits[1], backend = simulator, shots = 1).result()
counts01 = result01.get_counts()
print(counts01)
qt.plot_histogram(counts01, title="Histogram with the evaluating of |01> state under CNOT transformation")
circuits[2].x(qreg_q[0])
circuits[2].reset(qreg_q[1])
circuits[2].cx(qreg_q[0], qreg_q[1])
circuits[2].measure(qreg_q[0], creg_c[0])
circuits[2].measure(qreg_q[1], creg_c[1])
circuits[2].draw(output="mpl")
simulator = Aer.get_backend('qasm_simulator')
result10 = execute(circuits[2], backend = simulator, shots = 1).result()
counts10 = result10.get_counts()
print(counts10)
qt.plot_histogram(counts10, title="Histogram with the evaluating of |10> state under CNOT transformation")
circuits[3].x(qreg_q[0])
circuits[3].x(qreg_q[1])
circuits[3].cx(qreg_q[0], qreg_q[1])
circuits[3].measure(qreg_q[0], creg_c[0])
circuits[3].measure(qreg_q[1], creg_c[1])
circuits[3].draw(output="mpl")
simulator = Aer.get_backend('qasm_simulator')
result11 = execute(circuits[3], backend = simulator, shots = 1).result()
counts11 = result11.get_counts()
print(counts11)
qt.plot_histogram(counts11, title="Histogram with the evaluating of |11> state under CNOT transformation")
|
https://github.com/RigiResearch/qiskit-fall-fest-22-public
|
RigiResearch
|
from qiskit import QuantumCircuit
from qiskit import Aer, transpile
from qiskit.tools.visualization import plot_histogram
Aer.backends()
simulator = Aer.get_backend('statevector_simulator')
# Create a Quantum Circuit acting on a quantum register of three qubits
circ = QuantumCircuit(3)
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1
circ.cx(0, 1)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting
# the qubits in a GHZ state.
circ.cx(0, 2)
circ.draw('mpl')
# Create a Quantum Program for execution
result = simulator.run(circ).result()
counts = result.get_counts(circ)
print(counts)
plot_histogram(counts)
simulator = Aer.get_backend('aer_simulator')
# Create a Quantum Circuit acting on a quantum register of three qubits
circ = QuantumCircuit(3)
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1
circ.cx(0, 1)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting
# the qubits in a GHZ state.
circ.cx(0, 2)
circ.measure_all()
circ = transpile(circ, simulator)
# Run and get counts
result = simulator.run(circ).result()
counts = result.get_counts(circ)
plot_histogram(counts, title='GHZ State counts')
result = simulator.run(circ, shots=10, memory=True).result()
memory = result.get_memory(circ)
print(memory)
# IBMQ.load_account()
# provider = IBMQ.get_provider(hub = 'ibm-q')
# device = provider.get_backend('ibmq_16_melbourne')
# job = execute(circuit,backend = device,shots = 1024)
# print(job.job_id())
# device_result = job.result()
# plot_histogram(device_result.get_counts(circuit))
|
https://github.com/sorin-bolos/QiskitCampAsia2019
|
sorin-bolos
|
# useful additional packages
import matplotlib.pyplot as plt
import matplotlib.axes as axes
%matplotlib inline
import numpy as np
import networkx as nx
from qiskit import BasicAer
from qiskit.tools.visualization import plot_histogram
from qiskit.aqua import run_algorithm
from qiskit.aqua.input import EnergyInput
from qiskit.aqua.translators.ising import max_cut, tsp
from qiskit.aqua.algorithms import VQE, ExactEigensolver
from qiskit.aqua.components.optimizers import SPSA
from qiskit.aqua.components.variational_forms import RY
from qiskit.aqua import QuantumInstance
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import WeightedPauliOperator
from collections import OrderedDict
import math
# setup aqua logging
import logging
from qiskit.aqua import set_qiskit_aqua_logging
# set_qiskit_aqua_logging(logging.DEBUG) # choose INFO, DEBUG to see the log
def sample_most_likely(state_vector):
if isinstance(state_vector, dict) or isinstance(state_vector, OrderedDict):
# get the binary string with the largest count
binary_string = sorted(state_vector.items(), key=lambda kv: kv[1])[-1][0]
x = np.asarray([int(y) for y in reversed(list(binary_string))])
return x
else:
n = int(np.log2(state_vector.shape[0]))
k = np.argmax(np.abs(state_vector))
x = np.zeros(n)
for i in range(n):
x[i] = k % 2
k >>= 1
return x
def get_knapsack_qubitops(values, weights, w_max, M):
ysize = int(math.log(w_max + 1, 2))
n = len(values)
num_values = n + ysize;
pauli_list = []
shift = 0
#term for sum(x_i*w_i)^2
for i in range(n):
for j in range(n):
coef = -1 * 0.25 * weights[i] * weights[j] * M
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[j] = not zp[j]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[i] = not zp[i]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
coef = -1 * coef
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[i] = not zp[i]
zp[j] = not zp[j]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
#term for sum(2^j*y_j)^2
for i in range(ysize):
for j in range(ysize):
coef = -1 * 0.25 * (2^i) * (2^j) * M
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[n+j] = not zp[n+j]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[n+i] = not zp[n+i]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
coef = -1 * coef
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[n+i] = not zp[n+i]
zp[n+j] = not zp[n+j]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
#term for -2*W_max*sum(x_i*w_i)
for i in range(n):
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[i] = not zp[i]
coef = w_max * weights[i] * M
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
#term for -2*W_max*sum(2^j*y_j)
for j in range(ysize):
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[n+j] = not zp[n+j]
coef = w_max * (2^j) * M
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
for i in range(n):
for j in range(ysize):
coef = -0.5 * weights[i] * (2^j) * M
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[n+j] = not zp[n+j]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[i] = not zp[i]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
coef = -1 * coef
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[i] = not zp[i]
zp[n+j] = not zp[n+j]
pauli_list.append([coef, Pauli(zp, xp)])
shift -= coef
#term for sum(x_i*v_i)
for i in range(n):
xp = np.zeros(num_values, dtype=np.bool)
zp = np.zeros(num_values, dtype=np.bool)
zp[i] = not zp[i]
pauli_list.append([0.5 * values[i], Pauli(zp, xp)])
shift -= 0.5 * values[i]
return WeightedPauliOperator(paulis=pauli_list), shift
values = [680, 120, 57, 178]
weights = [3, 6, 5, 9]
w_max = 15
M = 2000000
qubitOp, offset = get_knapsack_qubitops(values, weights, w_max, M)
algo_input = EnergyInput(qubitOp)
ee = ExactEigensolver(qubitOp, k=1)
result = ee.run()
most_lightly = result['eigvecs'][0]
x = sample_most_likely(most_lightly)
print('result=' + str(x[:len(values)]))
seed = 10598
spsa = SPSA(max_trials=300)
ry = RY(qubitOp.num_qubits, depth=5, entanglement='linear')
vqe = VQE(qubitOp, ry, spsa)
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed)
result_statevector = vqe.run(quantum_instance)
most_lightly_sv = result_statevector['eigvecs'][0]
x_statevector = sample_most_likely(most_lightly_sv)
print('result usig statevector_simulator =' + str(x_statevector[:len(values)]))
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=300)
ry = RY(qubitOp.num_qubits, depth=5, entanglement='linear')
vqe = VQE(qubitOp, ry, spsa)
backend = BasicAer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
result_shots = vqe.run(quantum_instance)
most_lightly_shots = result_shots['eigvecs'][0]
x_shots = sample_most_likely(most_lightly_shots)
print('result usig qasm_simulator =' + str(x_shots[:len(values)]))
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import pulse
from qiskit.providers.fake_provider import FakeArmonk
backend = FakeArmonk()
with pulse.build(backend) as drive_sched:
d0 = pulse.drive_channel(0)
a0 = pulse.acquire_channel(0)
pulse.play(pulse.library.Constant(10, 1.0), d0)
pulse.delay(20, d0)
pulse.shift_phase(3.14/2, d0)
pulse.set_phase(3.14, d0)
pulse.shift_frequency(1e7, d0)
pulse.set_frequency(5e9, d0)
with pulse.build() as temp_sched:
pulse.play(pulse.library.Gaussian(20, 1.0, 3.0), d0)
pulse.play(pulse.library.Gaussian(20, -1.0, 3.0), d0)
pulse.call(temp_sched)
pulse.acquire(30, a0, pulse.MemorySlot(0))
drive_sched.draw()
|
https://github.com/WhenTheyCry96/qiskitHackathon2022
|
WhenTheyCry96
|
import warnings
warnings.filterwarnings('ignore')
from qiskit_metal import designs, MetalGUI
design = designs.DesignPlanar()
design.overwrite_enabled = True
design.chips.main.size_x = '12mm'
design.chips.main.size_y = '10mm'
gui = MetalGUI(design)
from qiskit_metal.qlibrary.qubits.transmon_pocket_cl import TransmonPocketCL
design.delete_all_components()
design_span_x = 5
design_span_y = 3
half_chip_width = design_span_x / 2
half_chip_height = design_span_y / 2
connection_pads_options = dict(
a = dict(loc_W=1, loc_H=-1),
b = dict(loc_W=1, loc_H=1),
c = dict(loc_W=-1, loc_H=-1)
)
connection23_pads_options = dict(
a = dict(loc_W=1, loc_H=-1),
c = dict(loc_W=-1, loc_H=-1)
)
transmons = []
transmons.append(TransmonPocketCL(design, 'Q1',
options=dict(pos_x=f'-{half_chip_width}mm',
pos_y=f'{-half_chip_height}mm',
connection_pads=dict(**connection_pads_options))))
transmons.append(TransmonPocketCL(design, 'Q2',
options=dict(pos_x=f'0mm',
pos_y=f'{half_chip_height}mm',
orientation=-90,
connection_pads=dict(d=dict(loc_W=-1, loc_H=1), **connection23_pads_options))))
transmons.append(TransmonPocketCL(design, 'Q3',
options=dict(pos_x=f'0mm',
pos_y=f'{-half_chip_height}mm',
orientation=90,
connection_pads=dict(d=dict(loc_W=-1, loc_H=1), **connection23_pads_options))))
transmons.append(TransmonPocketCL(design, 'Q4',
options=dict(pos_x=f'{half_chip_width}mm',
pos_y=f'{half_chip_height}mm',
orientation=180,
connection_pads=dict(**connection_pads_options))))
gui.rebuild()
gui.autoscale()
from qiskit_metal.qlibrary.tlines.meandered import RouteMeander
from qiskit_metal import Dict
fillet='99.99um'
options = Dict(
meander=Dict(
lead_start='0.1mm',
lead_end='0.1mm',
asymmetry='0 um')
)
def connect(component_name: str, component1: str, pin1: str, component2: str, pin2: str,
length: str,
asymmetry='0 um', start_strght='0 um', end_strght='0 um', flip=False):
"""Connect two pins with a CPW."""
myoptions = Dict(
pin_inputs=Dict(
start_pin=Dict(
component=component1,
pin=pin1),
end_pin=Dict(
component=component2,
pin=pin2)),
lead=Dict(
start_straight=start_strght,
end_straight=end_strght
),
total_length=length,
fillet = '99.9um')
myoptions.update(options)
myoptions.meander.asymmetry = asymmetry
myoptions.meander.lead_direction_inverted = 'true' if flip else 'false'
return RouteMeander(design, component_name, myoptions)
asym_h = 100
asym_v = 100
cpw = []
cpw.append(connect('cpw1', 'Q1', 'b', 'Q2', 'a', '8 mm', f'+{asym_h}um', '0.1mm', '0.1mm'))
cpw.append(connect('cpw3', 'Q4', 'b', 'Q3', 'a', '8 mm', f'+{asym_h}um', '0.1mm', '0.1mm'))
cpw.append(connect('cpw4', 'Q3', 'd', 'Q1', 'a', '8 mm', f'-{asym_h}um', '0.1mm', '0.1mm'))
cpw.append(connect('cpw5', 'Q2', 'd', 'Q4', 'a', '8 mm', f'-{asym_h}um', '0.1mm', '0.1mm'))
gui.rebuild()
gui.autoscale()
from qiskit_metal.qlibrary.terminations.launchpad_wb_coupled import LaunchpadWirebondCoupled
readouts_lwc = []
control_lwc = []
offset_x = 0
offset_y = 1
#Readouts
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R1',
options = dict(
pos_x = '-5mm',
pos_y = f'-{half_chip_height+offset_y}mm',
lead_length = '30um')))
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R2',
options = dict(
pos_x = '-1mm',
pos_y = '4mm',
orientation = -90,
lead_length = '30um')))
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R3',
options = dict(
pos_x = '1mm',
pos_y = '-4mm',
orientation = 90,
lead_length = '30um')))
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R4',
options = dict(
pos_x = '5mm',
pos_y = f'{half_chip_height+offset_y}mm',
orientation = 180,
lead_length = '30um')))
#Controls
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL1',
options = dict(
pos_x = '-5mm',
pos_y = '2mm',
lead_length = '30um')))
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL2',
options = dict(
pos_x = '4mm',
pos_y = '4mm',
orientation = -90,
lead_length = '30um')))
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL3',
options = dict(
pos_x = '-4mm',
pos_y = '-4mm',
orientation = 90,
lead_length = '30um')))
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL4',
options = dict(
pos_x = '5mm',
pos_y = '-2mm',
orientation = 180,
lead_length = '30um')))
gui.rebuild()
gui.autoscale()
readout_lines = []
asym_14 = 700
asym_23 = 700
options = Dict(
lead=Dict(
start_straight='330um',
end_straight='0um'),
fillet='99.99um')
readout_lines.append(connect('ol1', 'Q1', 'c', 'R1', 'tie', '8 mm', f'{asym_14}um'))
options = Dict(
lead=Dict(
start_straight='430um',
end_straight='0um'),
fillet='99.99um')
readout_lines.append(connect('ol2', 'Q2', 'c', 'R2', 'tie', '8 mm', f'{asym_23}um'))
readout_lines.append(connect('ol3', 'Q3', 'c', 'R3', 'tie', '8 mm', f'{asym_23}um'))
readout_lines.append(connect('ol4', 'Q4', 'c', 'R4', 'tie', '8 mm', f'{asym_14}um'))
gui.rebuild()
gui.autoscale()
from qiskit_metal.qlibrary.tlines.anchored_path import RouteAnchors
from collections import OrderedDict
import numpy as np
control_lines = []
def connectRouteAnchor(name: str,
component1: str, pin1: str, component2: str, pin2: str,
anchor_points: OrderedDict) -> RouteAnchors:
options_line_cl = dict(
pin_inputs = dict(start_pin = dict(component = component1, pin = pin1),
end_pin = dict(component = component2, pin = pin2)),
anchors = anchor_points,
lead = dict(start_straight = '200um',
end_straight = '225um'),
fillet = fillet
)
return RouteAnchors(design, name, options_line_cl)
anchors1c = OrderedDict()
anchors1c[0] = np.array([-4, -1.42])
anchors1c[1] = np.array([-4, 2])
control_lines.append(connectRouteAnchor('line_cl1', 'Q1', 'Charge_Line', 'CL1', 'tie', anchors1c))
anchors2c = OrderedDict()
anchors2c[0] = np.array([0.08, 3.25])
anchors2c[1] = np.array([4, 3.25])
control_lines.append(connectRouteAnchor('line_cl2', 'Q2', 'Charge_Line', 'CL2', 'tie', anchors2c))
anchors3c = OrderedDict()
anchors3c[0] = np.array([-0.08, -3.25])
anchors3c[1] = np.array([-4, -3.25])
control_lines.append(connectRouteAnchor('line_cl3', 'Q3', 'Charge_Line', 'CL3', 'tie', anchors3c))
anchors4c = OrderedDict()
anchors4c[0] = np.array([4, 1.42])
anchors4c[1] = np.array([4, -2])
control_lines.append(connectRouteAnchor('line_cl4', 'Q4', 'Charge_Line', 'CL4', 'tie', anchors4c))
gui.rebuild()
gui.autoscale()
gui.autoscale()
gui.screenshot(name="full_design")
import numpy as np
from scipy.constants import c, h, pi, hbar, e
from qiskit_metal.analyses.em.cpw_calculations import guided_wavelength
# constants:
phi0 = h/(2*e)
varphi0 = phi0/(2*pi)
# project target parameters
f_qList = np.around(np.linspace(5.25, 5.75, 4),2) # GHz
f_rList = f_qList + 1.8 # GHz
L_JJList = np.around(varphi0**2/((f_qList*1e9+300e6)**2/(8*300e6))/h*1e9, 2) # nH
# initial CPW readout lengths
def find_resonator_length(frequency, line_width, line_gap, N):
#frequency in GHz
#line_width/line_gap in um
#N -> 2 for lambda/2, 4 for lambda/4
[lambdaG, etfSqrt, q] = guided_wavelength(frequency*10**9, line_width*10**-6,
line_gap*10**-6, 750*10**-6, 200*10**-9)
return str(lambdaG/N*10**3)+" mm"
find_resonator_length(f_rList, 10, 6, 2)
find_resonator_length(np.around(np.linspace(8, 9.2, 4), 2), 10, 6, 2)
transmons[0].options.pad_gap = '40um'
transmons[0].options.pad_width = '550um' # 405
transmons[0].options.pad_height = '120um'
transmons[1].options.pad_gap = '40um'
transmons[1].options.pad_width = '500um' # 405
transmons[1].options.pad_height = '120um'
transmons[2].options.pad_gap = '40um'
transmons[2].options.pad_width = '460um' # 405
transmons[2].options.pad_height = '120um'
transmons[3].options.pad_gap = '40um'
transmons[3].options.pad_width = '440um' # 405
transmons[3].options.pad_height = '120um'
readout_lines[0].options.total_length = '8.63mm'
readout_lines[1].options.total_length = '8.42mm'
readout_lines[2].options.total_length = '8.24mm'
readout_lines[3].options.total_length = '8.06mm'
cpw[0].options.total_length = '7.6mm'
cpw[1].options.total_length = '7.2mm'
cpw[2].options.total_length = '6.9mm'
cpw[3].options.total_length = '6.6mm'
gui.rebuild()
gui.autoscale()
qcomps = design.components # short handle (alias)
qcomps['Q1'].options['hfss_inductance'] = 'Lj1'
qcomps['Q1'].options['hfss_capacitance'] = 'Cj1'
qcomps['Q2'].options['hfss_inductance'] = 'Lj2'
qcomps['Q2'].options['hfss_capacitance'] = 'Cj2'
qcomps['Q3'].options['hfss_inductance'] = 'Lj3'
qcomps['Q3'].options['hfss_capacitance'] = 'Cj3'
qcomps['Q4'].options['hfss_inductance'] = 'Lj4'
qcomps['Q4'].options['hfss_capacitance'] = 'Cj4'
from qiskit_metal.analyses.quantization import EPRanalysis
eig_qb = EPRanalysis(design, "hfss")
eig_qb.sim.setup.n_modes = 12
eig_qb.sim.setup.max_passes = 12
eig_qb.sim.setup.vars = Dict(Lj1= '10 nH', Cj1= '2 fF',
Lj2= '10 nH', Cj2= '2 fF',
Lj3= '10 nH', Cj3= '2 fF',
Lj4= '10 nH', Cj4= '2 fF')
eig_qb.sim.run(name="4Qubits", components=[])
eig_qb.sim.plot_convergences()
del eig_qb.setup.junctions['jj']
eig_qb.setup.junctions.jj1 = Dict(rect='JJ_rect_Lj_Q1_rect_jj', line='JJ_Lj_Q1_rect_jj_',
Lj_variable='Lj1', Cj_variable='Cj1')
eig_qb.setup.junctions.jj2 = Dict(rect='JJ_rect_Lj_Q2_rect_jj', line='JJ_Lj_Q2_rect_jj_',
Lj_variable='Lj2', Cj_variable='Cj2')
eig_qb.setup.junctions.jj3 = Dict(rect='JJ_rect_Lj_Q3_rect_jj', line='JJ_Lj_Q3_rect_jj_',
Lj_variable='Lj3', Cj_variable='Cj3')
eig_qb.setup.junctions.jj4 = Dict(rect='JJ_rect_Lj_Q4_rect_jj', line='JJ_Lj_Q4_rect_jj_',
Lj_variable='Lj4', Cj_variable='Cj4')
eig_qb.setup.sweep_variable = 'Lj1'
eig_qb.setup
eig_qb.run_epr()
eig_qb.sim.plot_fields('main', eigenmode=8)
#eig_2qb.sim.save_screenshot("eigen1.png")
transmons[0].options.pad_gap = '40um'
transmons[0].options.pad_width = '550um' # 405
transmons[0].options.pad_height = '120um'
gui.rebuild()
eig_qb = EPRanalysis(design, "hfss")
eig_qb.sim.setup.n_modes = 2
eig_qb.sim.run(name="Qbit", components=['Q1'], open_terminations=[], box_plus_buffer = False)
eig_qb.sim.plot_convergences()
transmons[1].options.pad_gap = '40um'
transmons[1].options.pad_width = '500um' # 405
transmons[1].options.pad_height = '120um'
gui.rebuild()
eig_qb = EPRanalysis(design, "hfss")
eig_qb.sim.setup.n_modes = 2
eig_qb.sim.run(name="Qbit2", components=['Q2'], open_terminations=[], box_plus_buffer = False)
eig_qb.sim.plot_convergences()
transmons[2].options.pad_gap = '40um'
transmons[2].options.pad_width = '460um' # 405
transmons[2].options.pad_height = '120um'
gui.rebuild()
eig_qb = EPRanalysis(design, "hfss")
eig_qb.sim.setup.n_modes = 2
eig_qb.sim.run(name="Qbit3", components=['Q3'], open_terminations=[], box_plus_buffer = False)
eig_qb.sim.plot_convergences()
transmons[3].options.pad_gap = '40um'
transmons[3].options.pad_width = '440um' # 405
transmons[3].options.pad_height = '120um'
gui.rebuild()
eig_qb = EPRanalysis(design, "hfss")
eig_qb.sim.setup.n_modes = 2
eig_qb.sim.run(name="Qbit4", components=['Q4'], open_terminations=[], box_plus_buffer = False)
eig_qb.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_rd = EPRanalysis(design, "hfss")
readout_lines[0].options.total_length = '11.7mm'
gui.rebuild()
eig_rd.sim.setup.n_modes = 2
eig_rd.sim.setup.max_passes = 10
eig_rd.sim.run(name="Readout",
components=['R1','ol1'])
eig_rd.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_rd = EPRanalysis(design, "hfss")
readout_lines[1].options.total_length = '11.2mm'
gui.rebuild()
eig_rd.sim.setup.n_modes = 2
eig_rd.sim.setup.max_passes = 10
eig_rd.sim.run(name="Readout",
components=['R2','ol2'])
eig_rd.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_rd = EPRanalysis(design, "hfss")
readout_lines[2].options.total_length = '10.6mm'
gui.rebuild()
eig_rd.sim.setup.n_modes = 2
eig_rd.sim.setup.max_passes = 10
eig_rd.sim.run(name="Readout",
components=['R3','ol3'])
eig_rd.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_rd = EPRanalysis(design, "hfss")
readout_lines[3].options.total_length = '10mm'
gui.rebuild()
eig_rd.sim.setup.n_modes = 2
eig_rd.sim.setup.max_passes = 10
eig_rd.sim.run(name="Readout",
components=['R4','ol4'])
eig_rd.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_c = EPRanalysis(design, "hfss")
cpw[0].options.total_length = '10mm'
gui.rebuild()
eig_c.sim.setup.n_modes = 2
eig_c.sim.setup.passes = 10
eig_c.sim.run(name="CPW1",
components=['cpw1'])
eig_c.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_c = EPRanalysis(design, "hfss")
cpw[1].options.total_length = '9.6mm'
gui.rebuild()
eig_c.sim.setup.n_modes = 2
eig_c.sim.setup.passes = 10
eig_c.sim.run(name="CPW2",
components=['cpw3'])
eig_c.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_c = EPRanalysis(design, "hfss")
cpw[2].options.total_length = '9.3mm'
gui.rebuild()
eig_c.sim.setup.n_modes = 2
eig_c.sim.setup.passes = 10
eig_c.sim.run(name="CPW4",
components=['cpw4'])
eig_c.sim.plot_convergences()
from qiskit_metal.analyses.quantization import EPRanalysis
eig_c = EPRanalysis(design, "hfss")
cpw[3].options.total_length = '9mm'
gui.rebuild()
eig_c.sim.setup.n_modes = 2
eig_c.sim.setup.passes = 10
eig_c.sim.run(name="CPW5",
components=['cpw5'])
eig_c.sim.plot_convergences()
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
#Assign these values as per your requirements.
global min_qubits,max_qubits,skip_qubits,max_circuits,num_shots,Noise_Inclusion
min_qubits=4
max_qubits=15 #reference files are upto 12 Qubits only
skip_qubits=2
max_circuits=3
num_shots=4092
gate_counts_plots = True
Noise_Inclusion = False
saveplots = False
Memory_utilization_plot = True
Type_of_Simulator = "built_in" #Inputs are "built_in" or "FAKE" or "FAKEV2"
backend_name = "FakeGuadalupeV2" #Can refer to the README files for the available backends
#Change your Specification of Simulator in Declaring Backend Section
#By Default : built_in -> qasm_simulator and FAKE -> FakeSantiago() and FAKEV2 -> FakeSantiagoV2()
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, transpile, execute
from qiskit.opflow import PauliTrotterEvolution, Suzuki
from qiskit.opflow.primitive_ops import PauliSumOp
import time,os,json
import matplotlib.pyplot as plt
# Import from Qiskit Aer noise module
from qiskit_aer.noise import (NoiseModel, QuantumError, ReadoutError,pauli_error, depolarizing_error, thermal_relaxation_error,reset_error)
# Benchmark Name
benchmark_name = "VQE Simulation"
# Selection of basis gate set for transpilation
# Note: selector 1 is a hardware agnostic gate set
basis_selector = 1
basis_gates_array = [
[],
['rx', 'ry', 'rz', 'cx'], # a common basis set, default
['cx', 'rz', 'sx', 'x'], # IBM default basis set
['rx', 'ry', 'rxx'], # IonQ default basis set
['h', 'p', 'cx'], # another common basis set
['u', 'cx'] # general unitaries basis gates
]
np.random.seed(0)
def get_QV(backend):
import json
# Assuming backend.conf_filename is the filename and backend.dirname is the directory path
conf_filename = backend.dirname + "/" + backend.conf_filename
# Open the JSON file
with open(conf_filename, 'r') as file:
# Load the JSON data
data = json.load(file)
# Extract the quantum_volume parameter
QV = data.get('quantum_volume', None)
return QV
def checkbackend(backend_name,Type_of_Simulator):
if Type_of_Simulator == "built_in":
available_backends = []
for i in Aer.backends():
available_backends.append(i.name)
if backend_name in available_backends:
platform = backend_name
return platform
else:
print(f"incorrect backend name or backend not available. Using qasm_simulator by default !!!!")
print(f"available backends are : {available_backends}")
platform = "qasm_simulator"
return platform
elif Type_of_Simulator == "FAKE" or Type_of_Simulator == "FAKEV2":
import qiskit.providers.fake_provider as fake_backends
if hasattr(fake_backends,backend_name) is True:
print(f"Backend {backend_name} is available for type {Type_of_Simulator}.")
backend_class = getattr(fake_backends,backend_name)
backend_instance = backend_class()
return backend_instance
else:
print(f"Backend {backend_name} is not available or incorrect for type {Type_of_Simulator}. Executing with FakeSantiago!!!")
if Type_of_Simulator == "FAKEV2":
backend_class = getattr(fake_backends,"FakeSantiagoV2")
else:
backend_class = getattr(fake_backends,"FakeSantiago")
backend_instance = backend_class()
return backend_instance
if Type_of_Simulator == "built_in":
platform = checkbackend(backend_name,Type_of_Simulator)
#By default using "Qasm Simulator"
backend = Aer.get_backend(platform)
QV_=None
print(f"{platform} device is capable of running {backend.num_qubits}")
print(f"backend version is {backend.backend_version}")
elif Type_of_Simulator == "FAKE":
basis_selector = 0
backend = checkbackend(backend_name,Type_of_Simulator)
QV_ = get_QV(backend)
platform = backend.properties().backend_name +"-"+ backend.properties().backend_version #Replace this string with the backend Provider's name as this is used for Plotting.
max_qubits=backend.configuration().n_qubits
print(f"{platform} device is capable of running {backend.configuration().n_qubits}")
print(f"{platform} has QV={QV_}")
if max_qubits > 30:
print(f"Device is capable with max_qubits = {max_qubits}")
max_qubit = 30
print(f"Using fake backend {platform} with max_qubits {max_qubits}")
elif Type_of_Simulator == "FAKEV2":
basis_selector = 0
if "V2" not in backend_name:
backend_name = backend_name+"V2"
backend = checkbackend(backend_name,Type_of_Simulator)
QV_ = get_QV(backend)
platform = backend.name +"-" +backend.backend_version
max_qubits=backend.num_qubits
print(f"{platform} device is capable of running {backend.num_qubits}")
print(f"{platform} has QV={QV_}")
if max_qubits > 30:
print(f"Device is capable with max_qubits = {max_qubits}")
max_qubit = 30
print(f"Using fake backend {platform} with max_qubits {max_qubits}")
else:
print("Enter valid Simulator.....")
# saved circuits for display
QC_ = None
Hf_ = None
CO_ = None
################### Circuit Definition #######################################
# Construct a Qiskit circuit for VQE Energy evaluation with UCCSD ansatz
# param: n_spin_orbs - The number of spin orbitals.
# return: return a Qiskit circuit for this VQE ansatz
def VQEEnergy(n_spin_orbs, na, nb, circuit_id=0, method=1):
# number of alpha spin orbitals
norb_a = int(n_spin_orbs / 2)
# construct the Hamiltonian
qubit_op = ReadHamiltonian(n_spin_orbs)
# allocate qubits
num_qubits = n_spin_orbs
qr = QuantumRegister(num_qubits)
qc = QuantumCircuit(qr, name=f"vqe-ansatz({method})-{num_qubits}-{circuit_id}")
# initialize the HF state
Hf = HartreeFock(num_qubits, na, nb)
qc.append(Hf, qr)
# form the list of single and double excitations
excitationList = []
for occ_a in range(na):
for vir_a in range(na, norb_a):
excitationList.append((occ_a, vir_a))
for occ_b in range(norb_a, norb_a+nb):
for vir_b in range(norb_a+nb, n_spin_orbs):
excitationList.append((occ_b, vir_b))
for occ_a in range(na):
for vir_a in range(na, norb_a):
for occ_b in range(norb_a, norb_a+nb):
for vir_b in range(norb_a+nb, n_spin_orbs):
excitationList.append((occ_a, vir_a, occ_b, vir_b))
# get cluster operators in Paulis
pauli_list = readPauliExcitation(n_spin_orbs, circuit_id)
# loop over the Pauli operators
for index, PauliOp in enumerate(pauli_list):
# get circuit for exp(-iP)
cluster_qc = ClusterOperatorCircuit(PauliOp, excitationList[index])
# add to ansatz
qc.append(cluster_qc, [i for i in range(cluster_qc.num_qubits)])
# method 1, only compute the last term in the Hamiltonian
if method == 1:
# last term in Hamiltonian
qc_with_mea, is_diag = ExpectationCircuit(qc, qubit_op[1], num_qubits)
# return the circuit
return qc_with_mea
# now we need to add the measurement parts to the circuit
# circuit list
qc_list = []
diag = []
off_diag = []
global normalization
normalization = 0.0
# add the first non-identity term
identity_qc = qc.copy()
identity_qc.measure_all()
qc_list.append(identity_qc) # add to circuit list
diag.append(qubit_op[1])
normalization += abs(qubit_op[1].coeffs[0]) # add to normalization factor
diag_coeff = abs(qubit_op[1].coeffs[0]) # add to coefficients of diagonal terms
# loop over rest of terms
for index, p in enumerate(qubit_op[2:]):
# get the circuit with expectation measurements
qc_with_mea, is_diag = ExpectationCircuit(qc, p, num_qubits)
# accumulate normalization
normalization += abs(p.coeffs[0])
# add to circuit list if non-diagonal
if not is_diag:
qc_list.append(qc_with_mea)
else:
diag_coeff += abs(p.coeffs[0])
# diagonal term
if is_diag:
diag.append(p)
# off-diagonal term
else:
off_diag.append(p)
# modify the name of diagonal circuit
qc_list[0].name = qubit_op[1].primitive.to_list()[0][0] + " " + str(np.real(diag_coeff))
normalization /= len(qc_list)
return qc_list
# Function that constructs the circuit for a given cluster operator
def ClusterOperatorCircuit(pauli_op, excitationIndex):
# compute exp(-iP)
exp_ip = pauli_op.exp_i()
# Trotter approximation
qc_op = PauliTrotterEvolution(trotter_mode=Suzuki(order=1, reps=1)).convert(exp_ip)
# convert to circuit
qc = qc_op.to_circuit(); qc.name = f'Cluster Op {excitationIndex}'
global CO_
if CO_ == None or qc.num_qubits <= 4:
if qc.num_qubits < 7: CO_ = qc
# return this circuit
return qc
# Function that adds expectation measurements to the raw circuits
def ExpectationCircuit(qc, pauli, nqubit, method=2):
# copy the unrotated circuit
raw_qc = qc.copy()
# whether this term is diagonal
is_diag = True
# primitive Pauli string
PauliString = pauli.primitive.to_list()[0][0]
# coefficient
coeff = pauli.coeffs[0]
# basis rotation
for i, p in enumerate(PauliString):
target_qubit = nqubit - i - 1
if (p == "X"):
is_diag = False
raw_qc.h(target_qubit)
elif (p == "Y"):
raw_qc.sdg(target_qubit)
raw_qc.h(target_qubit)
is_diag = False
# perform measurements
raw_qc.measure_all()
# name of this circuit
raw_qc.name = PauliString + " " + str(np.real(coeff))
# save circuit
global QC_
if QC_ == None or nqubit <= 4:
if nqubit < 7: QC_ = raw_qc
return raw_qc, is_diag
# Function that implements the Hartree-Fock state
def HartreeFock(norb, na, nb):
# initialize the quantum circuit
qc = QuantumCircuit(norb, name="Hf")
# alpha electrons
for ia in range(na):
qc.x(ia)
# beta electrons
for ib in range(nb):
qc.x(ib+int(norb/2))
# Save smaller circuit
global Hf_
if Hf_ == None or norb <= 4:
if norb < 7: Hf_ = qc
# return the circuit
return qc
################ Helper Functions
# Function that converts a list of single and double excitation operators to Pauli operators
def readPauliExcitation(norb, circuit_id=0):
# load pre-computed data
filename = os.path.join(f'ansatzes/{norb}_qubit_{circuit_id}.txt')
with open(filename) as f:
data = f.read()
ansatz_dict = json.loads(data)
# initialize Pauli list
pauli_list = []
# current coefficients
cur_coeff = 1e5
# current Pauli list
cur_list = []
# loop over excitations
for ext in ansatz_dict:
if cur_coeff > 1e4:
cur_coeff = ansatz_dict[ext]
cur_list = [(ext, ansatz_dict[ext])]
elif abs(abs(ansatz_dict[ext]) - abs(cur_coeff)) > 1e-4:
pauli_list.append(PauliSumOp.from_list(cur_list))
cur_coeff = ansatz_dict[ext]
cur_list = [(ext, ansatz_dict[ext])]
else:
cur_list.append((ext, ansatz_dict[ext]))
# add the last term
pauli_list.append(PauliSumOp.from_list(cur_list))
# return Pauli list
return pauli_list
# Get the Hamiltonian by reading in pre-computed file
def ReadHamiltonian(nqubit):
# load pre-computed data
filename = os.path.join(f'Hamiltonians/{nqubit}_qubit.txt')
with open(filename) as f:
data = f.read()
ham_dict = json.loads(data)
# pauli list
pauli_list = []
for p in ham_dict:
pauli_list.append( (p, ham_dict[p]) )
# build Hamiltonian
ham = PauliSumOp.from_list(pauli_list)
# return Hamiltonian
return ham
# Create an empty noise model
noise_parameters = NoiseModel()
if Type_of_Simulator == "built_in":
# Add depolarizing error to all single qubit gates with error rate 0.05% and to all two qubit gates with error rate 0.5%
depol_one_qb_error = 0.05
depol_two_qb_error = 0.005
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(depol_one_qb_error, 1), ['rx', 'ry', 'rz'])
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(depol_two_qb_error, 2), ['cx'])
# Add amplitude damping error to all single qubit gates with error rate 0.0% and to all two qubit gates with error rate 0.0%
amp_damp_one_qb_error = 0.0
amp_damp_two_qb_error = 0.0
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(amp_damp_one_qb_error, 1), ['rx', 'ry', 'rz'])
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(amp_damp_two_qb_error, 2), ['cx'])
# Add reset noise to all single qubit resets
reset_to_zero_error = 0.005
reset_to_one_error = 0.005
noise_parameters.add_all_qubit_quantum_error(reset_error(reset_to_zero_error, reset_to_one_error),["reset"])
# Add readout error
p0given1_error = 0.000
p1given0_error = 0.000
error_meas = ReadoutError([[1 - p1given0_error, p1given0_error], [p0given1_error, 1 - p0given1_error]])
noise_parameters.add_all_qubit_readout_error(error_meas)
#print(noise_parameters)
elif Type_of_Simulator == "FAKE"or"FAKEV2":
noise_parameters = NoiseModel.from_backend(backend)
#print(noise_parameters)
### Analysis methods to be expanded and eventually compiled into a separate analysis.py file
import math, functools
def hellinger_fidelity_with_expected(p, q):
""" p: result distribution, may be passed as a counts distribution
q: the expected distribution to be compared against
References:
`Hellinger Distance @ wikipedia <https://en.wikipedia.org/wiki/Hellinger_distance>`_
Qiskit Hellinger Fidelity Function
"""
p_sum = sum(p.values())
q_sum = sum(q.values())
if q_sum == 0:
print("ERROR: polarization_fidelity(), expected distribution is invalid, all counts equal to 0")
return 0
p_normed = {}
for key, val in p.items():
p_normed[key] = val/p_sum
# if p_sum != 0:
# p_normed[key] = val/p_sum
# else:
# p_normed[key] = 0
q_normed = {}
for key, val in q.items():
q_normed[key] = val/q_sum
total = 0
for key, val in p_normed.items():
if key in q_normed.keys():
total += (np.sqrt(val) - np.sqrt(q_normed[key]))**2
del q_normed[key]
else:
total += val
total += sum(q_normed.values())
# in some situations (error mitigation) this can go negative, use abs value
if total < 0:
print(f"WARNING: using absolute value in fidelity calculation")
total = abs(total)
dist = np.sqrt(total)/np.sqrt(2)
fidelity = (1-dist**2)**2
return fidelity
def polarization_fidelity(counts, correct_dist, thermal_dist=None):
"""
Combines Hellinger fidelity and polarization rescaling into fidelity calculation
used in every benchmark
counts: the measurement outcomes after `num_shots` algorithm runs
correct_dist: the distribution we expect to get for the algorithm running perfectly
thermal_dist: optional distribution to pass in distribution from a uniform
superposition over all states. If `None`: generated as
`uniform_dist` with the same qubits as in `counts`
returns both polarization fidelity and the hellinger fidelity
Polarization from: `https://arxiv.org/abs/2008.11294v1`
"""
num_measured_qubits = len(list(correct_dist.keys())[0])
#print(num_measured_qubits)
counts = {k.zfill(num_measured_qubits): v for k, v in counts.items()}
# calculate hellinger fidelity between measured expectation values and correct distribution
hf_fidelity = hellinger_fidelity_with_expected(counts,correct_dist)
# to limit cpu and memory utilization, skip noise correction if more than 16 measured qubits
if num_measured_qubits > 16:
return { 'fidelity':hf_fidelity, 'hf_fidelity':hf_fidelity }
# if not provided, generate thermal dist based on number of qubits
if thermal_dist == None:
thermal_dist = uniform_dist(num_measured_qubits)
# set our fidelity rescaling value as the hellinger fidelity for a depolarized state
floor_fidelity = hellinger_fidelity_with_expected(thermal_dist, correct_dist)
# rescale fidelity result so uniform superposition (random guessing) returns fidelity
# rescaled to 0 to provide a better measure of success of the algorithm (polarization)
new_floor_fidelity = 0
fidelity = rescale_fidelity(hf_fidelity, floor_fidelity, new_floor_fidelity)
return { 'fidelity':fidelity, 'hf_fidelity':hf_fidelity }
## Uniform distribution function commonly used
def rescale_fidelity(fidelity, floor_fidelity, new_floor_fidelity):
"""
Linearly rescales our fidelities to allow comparisons of fidelities across benchmarks
fidelity: raw fidelity to rescale
floor_fidelity: threshold fidelity which is equivalent to random guessing
new_floor_fidelity: what we rescale the floor_fidelity to
Ex, with floor_fidelity = 0.25, new_floor_fidelity = 0.0:
1 -> 1;
0.25 -> 0;
0.5 -> 0.3333;
"""
rescaled_fidelity = (1-new_floor_fidelity)/(1-floor_fidelity) * (fidelity - 1) + 1
# ensure fidelity is within bounds (0, 1)
if rescaled_fidelity < 0:
rescaled_fidelity = 0.0
if rescaled_fidelity > 1:
rescaled_fidelity = 1.0
return rescaled_fidelity
def uniform_dist(num_state_qubits):
dist = {}
for i in range(2**num_state_qubits):
key = bin(i)[2:].zfill(num_state_qubits)
dist[key] = 1/(2**num_state_qubits)
return dist
from matplotlib.patches import Rectangle
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize
from matplotlib.patches import Circle
############### Color Map functions
# Create a selection of colormaps from which to choose; default to custom_spectral
cmap_spectral = plt.get_cmap('Spectral')
cmap_greys = plt.get_cmap('Greys')
cmap_blues = plt.get_cmap('Blues')
cmap_custom_spectral = None
# the default colormap is the spectral map
cmap = cmap_spectral
cmap_orig = cmap_spectral
# current cmap normalization function (default None)
cmap_norm = None
default_fade_low_fidelity_level = 0.16
default_fade_rate = 0.7
# Specify a normalization function here (default None)
def set_custom_cmap_norm(vmin, vmax):
global cmap_norm
if vmin == vmax or (vmin == 0.0 and vmax == 1.0):
print("... setting cmap norm to None")
cmap_norm = None
else:
print(f"... setting cmap norm to [{vmin}, {vmax}]")
cmap_norm = Normalize(vmin=vmin, vmax=vmax)
# Remake the custom spectral colormap with user settings
def set_custom_cmap_style(
fade_low_fidelity_level=default_fade_low_fidelity_level,
fade_rate=default_fade_rate):
#print("... set custom map style")
global cmap, cmap_custom_spectral, cmap_orig
cmap_custom_spectral = create_custom_spectral_cmap(
fade_low_fidelity_level=fade_low_fidelity_level, fade_rate=fade_rate)
cmap = cmap_custom_spectral
cmap_orig = cmap_custom_spectral
# Create the custom spectral colormap from the base spectral
def create_custom_spectral_cmap(
fade_low_fidelity_level=default_fade_low_fidelity_level,
fade_rate=default_fade_rate):
# determine the breakpoint from the fade level
num_colors = 100
breakpoint = round(fade_low_fidelity_level * num_colors)
# get color list for spectral map
spectral_colors = [cmap_spectral(v/num_colors) for v in range(num_colors)]
#print(fade_rate)
# create a list of colors to replace those below the breakpoint
# and fill with "faded" color entries (in reverse)
low_colors = [0] * breakpoint
#for i in reversed(range(breakpoint)):
for i in range(breakpoint):
# x is index of low colors, normalized 0 -> 1
x = i / breakpoint
# get color at this index
bc = spectral_colors[i]
r0 = bc[0]
g0 = bc[1]
b0 = bc[2]
z0 = bc[3]
r_delta = 0.92 - r0
#print(f"{x} {bc} {r_delta}")
# compute saturation and greyness ratio
sat_ratio = 1 - x
#grey_ratio = 1 - x
''' attempt at a reflective gradient
if i >= breakpoint/2:
xf = 2*(x - 0.5)
yf = pow(xf, 1/fade_rate)/2
grey_ratio = 1 - (yf + 0.5)
else:
xf = 2*(0.5 - x)
yf = pow(xf, 1/fade_rate)/2
grey_ratio = 1 - (0.5 - yf)
'''
grey_ratio = 1 - math.pow(x, 1/fade_rate)
#print(f" {xf} {yf} ")
#print(f" {sat_ratio} {grey_ratio}")
r = r0 + r_delta * sat_ratio
g_delta = r - g0
b_delta = r - b0
g = g0 + g_delta * grey_ratio
b = b0 + b_delta * grey_ratio
#print(f"{r} {g} {b}\n")
low_colors[i] = (r,g,b,z0)
#print(low_colors)
# combine the faded low colors with the regular spectral cmap to make a custom version
cmap_custom_spectral = ListedColormap(low_colors + spectral_colors[breakpoint:])
#spectral_colors = [cmap_custom_spectral(v/10) for v in range(10)]
#for i in range(10): print(spectral_colors[i])
#print("")
return cmap_custom_spectral
# Make the custom spectral color map the default on module init
set_custom_cmap_style()
# Arrange the stored annotations optimally and add to plot
def anno_volumetric_data(ax, depth_base=2, label='Depth',
labelpos=(0.2, 0.7), labelrot=0, type=1, fill=True):
# sort all arrays by the x point of the text (anno_offs)
global x_anno_offs, y_anno_offs, anno_labels, x_annos, y_annos
all_annos = sorted(zip(x_anno_offs, y_anno_offs, anno_labels, x_annos, y_annos))
x_anno_offs = [a for a,b,c,d,e in all_annos]
y_anno_offs = [b for a,b,c,d,e in all_annos]
anno_labels = [c for a,b,c,d,e in all_annos]
x_annos = [d for a,b,c,d,e in all_annos]
y_annos = [e for a,b,c,d,e in all_annos]
#print(f"{x_anno_offs}")
#print(f"{y_anno_offs}")
#print(f"{anno_labels}")
for i in range(len(anno_labels)):
x_anno = x_annos[i]
y_anno = y_annos[i]
x_anno_off = x_anno_offs[i]
y_anno_off = y_anno_offs[i]
label = anno_labels[i]
if i > 0:
x_delta = abs(x_anno_off - x_anno_offs[i - 1])
y_delta = abs(y_anno_off - y_anno_offs[i - 1])
if y_delta < 0.7 and x_delta < 2:
y_anno_off = y_anno_offs[i] = y_anno_offs[i - 1] - 0.6
#x_anno_off = x_anno_offs[i] = x_anno_offs[i - 1] + 0.1
ax.annotate(label,
xy=(x_anno+0.0, y_anno+0.1),
arrowprops=dict(facecolor='black', shrink=0.0,
width=0.5, headwidth=4, headlength=5, edgecolor=(0.8,0.8,0.8)),
xytext=(x_anno_off + labelpos[0], y_anno_off + labelpos[1]),
rotation=labelrot,
horizontalalignment='left', verticalalignment='baseline',
color=(0.2,0.2,0.2),
clip_on=True)
if saveplots == True:
plt.savefig("VolumetricPlotSample.jpg")
# Plot one group of data for volumetric presentation
def plot_volumetric_data(ax, w_data, d_data, f_data, depth_base=2, label='Depth',
labelpos=(0.2, 0.7), labelrot=0, type=1, fill=True, w_max=18, do_label=False, do_border=True,
x_size=1.0, y_size=1.0, zorder=1, offset_flag=False,
max_depth=0, suppress_low_fidelity=False):
# since data may come back out of order, save point at max y for annotation
i_anno = 0
x_anno = 0
y_anno = 0
# plot data rectangles
low_fidelity_count = True
last_y = -1
k = 0
# determine y-axis dimension for one pixel to use for offset of bars that start at 0
(_, dy) = get_pixel_dims(ax)
# do this loop in reverse to handle the case where earlier cells are overlapped by later cells
for i in reversed(range(len(d_data))):
x = depth_index(d_data[i], depth_base)
y = float(w_data[i])
f = f_data[i]
# each time we star a new row, reset the offset counter
# DEVNOTE: this is highly specialized for the QA area plots, where there are 8 bars
# that represent time starting from 0 secs. We offset by one pixel each and center the group
if y != last_y:
last_y = y;
k = 3 # hardcoded for 8 cells, offset by 3
#print(f"{i = } {x = } {y = }")
if max_depth > 0 and d_data[i] > max_depth:
#print(f"... excessive depth (2), skipped; w={y} d={d_data[i]}")
break;
# reject cells with low fidelity
if suppress_low_fidelity and f < suppress_low_fidelity_level:
if low_fidelity_count: break
else: low_fidelity_count = True
# the only time this is False is when doing merged gradation plots
if do_border == True:
# this case is for an array of x_sizes, i.e. each box has different width
if isinstance(x_size, list):
# draw each of the cells, with no offset
if not offset_flag:
ax.add_patch(box_at(x, y, f, type=type, fill=fill, x_size=x_size[i], y_size=y_size, zorder=zorder))
# use an offset for y value, AND account for x and width to draw starting at 0
else:
ax.add_patch(box_at((x/2 + x_size[i]/4), y + k*dy, f, type=type, fill=fill, x_size=x+ x_size[i]/2, y_size=y_size, zorder=zorder))
# this case is for only a single cell
else:
ax.add_patch(box_at(x, y, f, type=type, fill=fill, x_size=x_size, y_size=y_size))
# save the annotation point with the largest y value
if y >= y_anno:
x_anno = x
y_anno = y
i_anno = i
# move the next bar down (if using offset)
k -= 1
# if no data rectangles plotted, no need for a label
if x_anno == 0 or y_anno == 0:
return
x_annos.append(x_anno)
y_annos.append(y_anno)
anno_dist = math.sqrt( (y_anno - 1)**2 + (x_anno - 1)**2 )
# adjust radius of annotation circle based on maximum width of apps
anno_max = 10
if w_max > 10:
anno_max = 14
if w_max > 14:
anno_max = 18
scale = anno_max / anno_dist
# offset of text from end of arrow
if scale > 1:
x_anno_off = scale * x_anno - x_anno - 0.5
y_anno_off = scale * y_anno - y_anno
else:
x_anno_off = 0.7
y_anno_off = 0.5
x_anno_off += x_anno
y_anno_off += y_anno
# print(f"... {xx} {yy} {anno_dist}")
x_anno_offs.append(x_anno_off)
y_anno_offs.append(y_anno_off)
anno_labels.append(label)
if do_label:
ax.annotate(label, xy=(x_anno+labelpos[0], y_anno+labelpos[1]), rotation=labelrot,
horizontalalignment='left', verticalalignment='bottom', color=(0.2,0.2,0.2))
x_annos = []
y_annos = []
x_anno_offs = []
y_anno_offs = []
anno_labels = []
# init arrays to hold annotation points for label spreading
def vplot_anno_init ():
global x_annos, y_annos, x_anno_offs, y_anno_offs, anno_labels
x_annos = []
y_annos = []
x_anno_offs = []
y_anno_offs = []
anno_labels = []
# Number of ticks on volumetric depth axis
max_depth_log = 22
# average transpile factor between base QV depth and our depth based on results from QV notebook
QV_transpile_factor = 12.7
# format a number using K,M,B,T for large numbers, optionally rounding to 'digits' decimal places if num > 1
# (sign handling may be incorrect)
def format_number(num, digits=0):
if isinstance(num, str): num = float(num)
num = float('{:.3g}'.format(abs(num)))
sign = ''
metric = {'T': 1000000000000, 'B': 1000000000, 'M': 1000000, 'K': 1000, '': 1}
for index in metric:
num_check = num / metric[index]
if num_check >= 1:
num = round(num_check, digits)
sign = index
break
numstr = f"{str(num)}"
if '.' in numstr:
numstr = numstr.rstrip('0').rstrip('.')
return f"{numstr}{sign}"
# Return the color associated with the spcific value, using color map norm
def get_color(value):
# if there is a normalize function installed, scale the data
if cmap_norm:
value = float(cmap_norm(value))
if cmap == cmap_spectral:
value = 0.05 + value*0.9
elif cmap == cmap_blues:
value = 0.00 + value*1.0
else:
value = 0.0 + value*0.95
return cmap(value)
# Return the x and y equivalent to a single pixel for the given plot axis
def get_pixel_dims(ax):
# transform 0 -> 1 to pixel dimensions
pixdims = ax.transData.transform([(0,1),(1,0)])-ax.transData.transform((0,0))
xpix = pixdims[1][0]
ypix = pixdims[0][1]
#determine x- and y-axis dimension for one pixel
dx = (1 / xpix)
dy = (1 / ypix)
return (dx, dy)
############### Helper functions
# return the base index for a circuit depth value
# take the log in the depth base, and add 1
def depth_index(d, depth_base):
if depth_base <= 1:
return d
if d == 0:
return 0
return math.log(d, depth_base) + 1
# draw a box at x,y with various attributes
def box_at(x, y, value, type=1, fill=True, x_size=1.0, y_size=1.0, alpha=1.0, zorder=1):
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.5,0.5,0.5)
return Rectangle((x - (x_size/2), y - (y_size/2)), x_size, y_size,
alpha=alpha,
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.5*y_size,
zorder=zorder)
# draw a circle at x,y with various attributes
def circle_at(x, y, value, type=1, fill=True):
size = 1.0
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.5,0.5,0.5)
return Circle((x, y), size/2,
alpha = 0.7, # DEVNOTE: changed to 0.7 from 0.5, to handle only one cell
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.5)
def box4_at(x, y, value, type=1, fill=True, alpha=1.0):
size = 1.0
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.3,0.3,0.3)
ec = fc
return Rectangle((x - size/8, y - size/2), size/4, size,
alpha=alpha,
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.1)
# Draw a Quantum Volume rectangle with specified width and depth, and grey-scale value
def qv_box_at(x, y, qv_width, qv_depth, value, depth_base):
#print(f"{qv_width} {qv_depth} {depth_index(qv_depth, depth_base)}")
return Rectangle((x - 0.5, y - 0.5), depth_index(qv_depth, depth_base), qv_width,
edgecolor = (value,value,value),
facecolor = (value,value,value),
fill=True,
lw=1)
def bkg_box_at(x, y, value=0.9):
size = 0.6
return Rectangle((x - size/2, y - size/2), size, size,
edgecolor = (.75,.75,.75),
facecolor = (value,value,value),
fill=True,
lw=0.5)
def bkg_empty_box_at(x, y):
size = 0.6
return Rectangle((x - size/2, y - size/2), size, size,
edgecolor = (.75,.75,.75),
facecolor = (1.0,1.0,1.0),
fill=True,
lw=0.5)
# Plot the background for the volumetric analysis
def plot_volumetric_background(max_qubits=11, QV=32, depth_base=2, suptitle=None, avail_qubits=0, colorbar_label="Avg Result Fidelity"):
if suptitle == None:
suptitle = f"Volumetric Positioning\nCircuit Dimensions and Fidelity Overlaid on Quantum Volume = {QV}"
QV0 = QV
qv_estimate = False
est_str = ""
if QV == 0: # QV = 0 indicates "do not draw QV background or label"
QV = 2048
elif QV < 0: # QV < 0 indicates "add est. to label"
QV = -QV
qv_estimate = True
est_str = " (est.)"
if avail_qubits > 0 and max_qubits > avail_qubits:
max_qubits = avail_qubits
max_width = 13
if max_qubits > 11: max_width = 18
if max_qubits > 14: max_width = 20
if max_qubits > 16: max_width = 24
if max_qubits > 24: max_width = 33
#print(f"... {avail_qubits} {max_qubits} {max_width}")
plot_width = 6.8
plot_height = 0.5 + plot_width * (max_width / max_depth_log)
#print(f"... {plot_width} {plot_height}")
# define matplotlib figure and axis; use constrained layout to fit colorbar to right
fig, ax = plt.subplots(figsize=(plot_width, plot_height), constrained_layout=True)
plt.suptitle(suptitle)
plt.xlim(0, max_depth_log)
plt.ylim(0, max_width)
# circuit depth axis (x axis)
xbasis = [x for x in range(1,max_depth_log)]
xround = [depth_base**(x-1) for x in xbasis]
xlabels = [format_number(x) for x in xround]
ax.set_xlabel('Circuit Depth')
ax.set_xticks(xbasis)
plt.xticks(xbasis, xlabels, color='black', rotation=45, ha='right', va='top', rotation_mode="anchor")
# other label options
#plt.xticks(xbasis, xlabels, color='black', rotation=-60, ha='left')
#plt.xticks(xbasis, xlabels, color='black', rotation=-45, ha='left', va='center', rotation_mode="anchor")
# circuit width axis (y axis)
ybasis = [y for y in range(1,max_width)]
yround = [1,2,3,4,5,6,7,8,10,12,15] # not used now
ylabels = [str(y) for y in yround] # not used now
#ax.set_ylabel('Circuit Width (Number of Qubits)')
ax.set_ylabel('Circuit Width')
ax.set_yticks(ybasis)
#create simple line plot (not used right now)
#ax.plot([0, 10],[0, 10])
log2QV = math.log2(QV)
QV_width = log2QV
QV_depth = log2QV * QV_transpile_factor
# show a quantum volume rectangle of QV = 64 e.g. (6 x 6)
if QV0 != 0:
ax.add_patch(qv_box_at(1, 1, QV_width, QV_depth, 0.87, depth_base))
else:
ax.add_patch(qv_box_at(1, 1, QV_width, QV_depth, 0.91, depth_base))
# the untranspiled version is commented out - we do not show this by default
# also show a quantum volume rectangle un-transpiled
# ax.add_patch(qv_box_at(1, 1, QV_width, QV_width, 0.80, depth_base))
# show 2D array of volumetric cells based on this QV_transpiled
# DEVNOTE: we use +1 only to make the visuals work; s/b without
# Also, the second arg of the min( below seems incorrect, needs correction
maxprod = (QV_width + 1) * (QV_depth + 1)
for w in range(1, min(max_width, round(QV) + 1)):
# don't show VB squares if width greater than known available qubits
if avail_qubits != 0 and w > avail_qubits:
continue
i_success = 0
for d in xround:
# polarization factor for low circuit widths
maxtest = maxprod / ( 1 - 1 / (2**w) )
# if circuit would fail here, don't draw box
if d > maxtest: continue
if w * d > maxtest: continue
# guess for how to capture how hardware decays with width, not entirely correct
# # reduce maxtext by a factor of number of qubits > QV_width
# # just an approximation to account for qubit distances
# if w > QV_width:
# over = w - QV_width
# maxtest = maxtest / (1 + (over/QV_width))
# draw a box at this width and depth
id = depth_index(d, depth_base)
# show vb rectangles; if not showing QV, make all hollow (or less dark)
if QV0 == 0:
#ax.add_patch(bkg_empty_box_at(id, w))
ax.add_patch(bkg_box_at(id, w, 0.95))
else:
ax.add_patch(bkg_box_at(id, w, 0.9))
# save index of last successful depth
i_success += 1
# plot empty rectangle after others
d = xround[i_success]
id = depth_index(d, depth_base)
ax.add_patch(bkg_empty_box_at(id, w))
# Add annotation showing quantum volume
if QV0 != 0:
t = ax.text(max_depth_log - 2.0, 1.5, f"QV{est_str}={QV}", size=12,
horizontalalignment='right', verticalalignment='center', color=(0.2,0.2,0.2),
bbox=dict(boxstyle="square,pad=0.3", fc=(.9,.9,.9), ec="grey", lw=1))
# add colorbar to right of plot
plt.colorbar(cm.ScalarMappable(cmap=cmap), cax=None, ax=ax,
shrink=0.6, label=colorbar_label, panchor=(0.0, 0.7))
return ax
# Function to calculate circuit depth
def calculate_circuit_depth(qc):
# Calculate the depth of the circuit
depth = qc.depth()
return depth
def calculate_transpiled_depth(qc,basis_selector):
# use either the backend or one of the basis gate sets
if basis_selector == 0:
qc = transpile(qc, backend)
else:
basis_gates = basis_gates_array[basis_selector]
qc = transpile(qc, basis_gates=basis_gates, seed_transpiler=0)
transpiled_depth = qc.depth()
return transpiled_depth,qc
def plot_fidelity_data(fidelity_data, Hf_fidelity_data, title):
avg_fidelity_means = []
avg_Hf_fidelity_means = []
avg_num_qubits_values = list(fidelity_data.keys())
# Calculate the average fidelity and Hamming fidelity for each unique number of qubits
for num_qubits in avg_num_qubits_values:
avg_fidelity = np.average(fidelity_data[num_qubits])
avg_fidelity_means.append(avg_fidelity)
avg_Hf_fidelity = np.mean(Hf_fidelity_data[num_qubits])
avg_Hf_fidelity_means.append(avg_Hf_fidelity)
return avg_fidelity_means,avg_Hf_fidelity_means
list_of_gates = []
def list_of_standardgates():
import qiskit.circuit.library as lib
from qiskit.circuit import Gate
import inspect
# List all the attributes of the library module
gate_list = dir(lib)
# Filter out non-gate classes (like functions, variables, etc.)
gates = [gate for gate in gate_list if isinstance(getattr(lib, gate), type) and issubclass(getattr(lib, gate), Gate)]
# Get method names from QuantumCircuit
circuit_methods = inspect.getmembers(QuantumCircuit, inspect.isfunction)
method_names = [name for name, _ in circuit_methods]
# Map gate class names to method names
gate_to_method = {}
for gate in gates:
gate_class = getattr(lib, gate)
class_name = gate_class.__name__.replace('Gate', '').lower() # Normalize class name
for method in method_names:
if method == class_name or method == class_name.replace('cr', 'c-r'):
gate_to_method[gate] = method
break
# Add common operations that are not strictly gates
additional_operations = {
'Measure': 'measure',
'Barrier': 'barrier',
}
gate_to_method.update(additional_operations)
for k,v in gate_to_method.items():
list_of_gates.append(v)
def update_counts(gates,custom_gates):
operations = {}
for key, value in gates.items():
operations[key] = value
for key, value in custom_gates.items():
if key in operations:
operations[key] += value
else:
operations[key] = value
return operations
def get_gate_counts(gates,custom_gate_defs):
result = gates.copy()
# Iterate over the gate counts in the quantum circuit
for gate, count in gates.items():
if gate in custom_gate_defs:
custom_gate_ops = custom_gate_defs[gate]
# Multiply custom gate operations by the count of the custom gate in the circuit
for _ in range(count):
result = update_counts(result, custom_gate_ops)
# Remove the custom gate entry as we have expanded it
del result[gate]
return result
dict_of_qc = dict()
custom_gates_defs = dict()
# Function to count operations recursively
def count_operations(qc):
dict_of_qc.clear()
circuit_traverser(qc)
operations = dict()
operations = dict_of_qc[qc.name]
del dict_of_qc[qc.name]
# print("operations :",operations)
# print("dict_of_qc :",dict_of_qc)
for keys in operations.keys():
if keys not in list_of_gates:
for k,v in dict_of_qc.items():
if k in operations.keys():
custom_gates_defs[k] = v
operations=get_gate_counts(operations,custom_gates_defs)
custom_gates_defs.clear()
return operations
def circuit_traverser(qc):
dict_of_qc[qc.name]=dict(qc.count_ops())
for i in qc.data:
if str(i.operation.name) not in list_of_gates:
qc_1 = i.operation.definition
circuit_traverser(qc_1)
def get_memory():
import resource
usage = resource.getrusage(resource.RUSAGE_SELF)
max_mem = usage.ru_maxrss/1024 #in MB
return max_mem
def analyzer(qc,references,num_qubits):
# total circuit name (pauli string + coefficient)
total_name = qc.name
# pauli string
pauli_string = total_name.split()[0]
# get the correct measurement
if (len(total_name.split()) == 2):
correct_dist = references[pauli_string]
else:
circuit_id = int(total_name.split()[2])
correct_dist = references[f"Qubits - {num_qubits} - {circuit_id}"]
return correct_dist,total_name
# Max qubits must be 12 since the referenced files only go to 12 qubits
MAX_QUBITS = 12
method = 1
def run (min_qubits=min_qubits, max_qubits=max_qubits, skip_qubits=2,
max_circuits=max_circuits, num_shots=num_shots):
creation_times = []
elapsed_times = []
quantum_times = []
circuit_depths = []
transpiled_depths = []
fidelity_data = {}
Hf_fidelity_data = {}
numckts = []
mem_usage = []
algorithmic_1Q_gate_counts = []
algorithmic_2Q_gate_counts = []
transpiled_1Q_gate_counts = []
transpiled_2Q_gate_counts = []
print(f"{benchmark_name} Benchmark Program - {platform}")
#defining all the standard gates supported by qiskit in a list
if gate_counts_plots == True:
list_of_standardgates()
max_qubits = max(max_qubits, min_qubits) # max must be >= min
# validate parameters (smallest circuit is 4 qubits and largest is 10 qubits)
max_qubits = min(max_qubits, MAX_QUBITS)
min_qubits = min(max(4, min_qubits), max_qubits)
if min_qubits % 2 == 1: min_qubits += 1 # min_qubits must be even
skip_qubits = max(1, skip_qubits)
if method == 2: max_circuits = 1
if max_qubits < 4:
print(f"Max number of qubits {max_qubits} is too low to run method {method} of VQE algorithm")
return
global max_ckts
max_ckts = max_circuits
global min_qbits,max_qbits,skp_qubits
min_qbits = min_qubits
max_qbits = max_qubits
skp_qubits = skip_qubits
print(f"min, max qubits = {min_qubits} {max_qubits}")
# Execute Benchmark Program N times for multiple circuit sizes
for input_size in range(min_qubits, max_qubits + 1, skip_qubits):
# reset random seed
np.random.seed(0)
# determine the number of circuits to execute for this group
num_circuits = min(3, max_circuits)
num_qubits = input_size
fidelity_data[num_qubits] = []
Hf_fidelity_data[num_qubits] = []
# decides number of electrons
na = int(num_qubits/4)
nb = int(num_qubits/4)
# random seed
np.random.seed(0)
numckts.append(num_circuits)
# create the circuit for given qubit size and simulation parameters, store time metric
ts = time.time()
# circuit list
qc_list = []
# Method 1 (default)
if method == 1:
# loop over circuits
for circuit_id in range(num_circuits):
# construct circuit
qc_single = VQEEnergy(num_qubits, na, nb, circuit_id, method)
qc_single.name = qc_single.name + " " + str(circuit_id)
# add to list
qc_list.append(qc_single)
# method 2
elif method == 2:
# construct all circuits
qc_list = VQEEnergy(num_qubits, na, nb, 0, method)
print(qc_list)
print(f"************\nExecuting [{len(qc_list)}] circuits with num_qubits = {num_qubits}")
for qc in qc_list:
print("*********************************************")
#print(f"qc of {qc} qubits for qc_list value: {qc_list}")
# get circuit id
if method == 1:
circuit_id = qc.name.split()[2]
else:
circuit_id = qc.name.split()[0]
#creation time
creation_time = time.time() - ts
creation_times.append(creation_time)
#print(qc)
print(f"creation time = {creation_time*1000} ms")
# Calculate gate count for the algorithmic circuit (excluding barriers and measurements)
if gate_counts_plots == True:
operations = count_operations(qc)
n1q = 0; n2q = 0
if operations != None:
for key, value in operations.items():
if key == "measure": continue
if key == "barrier": continue
if key.startswith("c") or key.startswith("mc"):
n2q += value
else:
n1q += value
print("operations: ",operations)
algorithmic_1Q_gate_counts.append(n1q)
algorithmic_2Q_gate_counts.append(n2q)
# collapse the sub-circuit levels used in this benchmark (for qiskit)
qc=qc.decompose()
#print(qc)
# Calculate circuit depth
depth = calculate_circuit_depth(qc)
circuit_depths.append(depth)
# Calculate transpiled circuit depth
transpiled_depth,qc = calculate_transpiled_depth(qc,basis_selector)
transpiled_depths.append(transpiled_depth)
#print(qc)
print(f"Algorithmic Depth = {depth} and Normalized Depth = {transpiled_depth}")
if gate_counts_plots == True:
# Calculate gate count for the transpiled circuit (excluding barriers and measurements)
tr_ops = qc.count_ops()
#print("tr_ops = ",tr_ops)
tr_n1q = 0; tr_n2q = 0
if tr_ops != None:
for key, value in tr_ops.items():
if key == "measure": continue
if key == "barrier": continue
if key.startswith("c"): tr_n2q += value
else: tr_n1q += value
transpiled_1Q_gate_counts.append(tr_n1q)
transpiled_2Q_gate_counts.append(tr_n2q)
print(f"Algorithmic 1Q gates = {n1q} ,Algorithmic 2Q gates = {n2q}")
print(f"Normalized 1Q gates = {tr_n1q} ,Normalized 2Q gates = {tr_n2q}")
#execution
if Type_of_Simulator == "built_in":
#To check if Noise is required
if Noise_Inclusion == True:
noise_model = noise_parameters
else:
noise_model = None
ts = time.time()
job = execute(qc, backend, shots=num_shots, noise_model=noise_model)
elif Type_of_Simulator == "FAKE" or Type_of_Simulator == "FAKEV2" :
ts = time.time()
job = backend.run(qc,shots=num_shots, noise_model=noise_parameters)
#retrieving the result
result = job.result()
#print(result)
#calculating elapsed time
elapsed_time = time.time() - ts
elapsed_times.append(elapsed_time)
# Calculate quantum processing time
quantum_time = result.time_taken
quantum_times.append(quantum_time)
print(f"Elapsed time = {elapsed_time*1000} ms and Quantum Time = {quantum_time*1000} ms")
#counts in result object
counts = result.get_counts()
# load pre-computed data
if len(qc.name.split()) == 2:
filename = os.path.join(f'_common/precalculated_data_{num_qubits}_qubit.json')
with open(filename) as f:
references = json.load(f)
else:
filename = os.path.join(f'_common/precalculated_data_{num_qubits}_qubit_method2.json')
with open(filename) as f:
references = json.load(f)
#Correct distribution to compare with counts
correct_dist,total_name = analyzer(qc,references,num_qubits)
#fidelity calculation comparision of counts and correct_dist
fidelity_dict = polarization_fidelity(counts, correct_dist)
print(fidelity_dict)
# modify fidelity based on the coefficient
if (len(total_name.split()) == 2):
fidelity_dict *= ( abs(float(total_name.split()[1])) / normalization )
fidelity_data[num_qubits].append(fidelity_dict['fidelity'])
Hf_fidelity_data[num_qubits].append(fidelity_dict['hf_fidelity'])
#maximum memory utilization (if required)
if Memory_utilization_plot == True:
max_mem = get_memory()
print(f"Maximum Memory Utilized: {max_mem} MB")
mem_usage.append(max_mem)
print("*********************************************")
##########
# print a sample circuit
print("Sample Circuit:"); print(QC_ if QC_ != None else " ... too large!")
print("\nHartree Fock Generator 'Hf' ="); print(Hf_ if Hf_ != None else " ... too large!")
print("\nCluster Operator Example 'Cluster Op' ="); print(CO_ if CO_ != None else " ... too large!")
return (creation_times, elapsed_times, quantum_times, circuit_depths, transpiled_depths,
fidelity_data, Hf_fidelity_data, numckts , algorithmic_1Q_gate_counts, algorithmic_2Q_gate_counts,
transpiled_1Q_gate_counts, transpiled_2Q_gate_counts,mem_usage)
# Execute the benchmark program, accumulate metrics, and calculate circuit depths
(creation_times, elapsed_times, quantum_times, circuit_depths,transpiled_depths, fidelity_data, Hf_fidelity_data, numckts,
algorithmic_1Q_gate_counts, algorithmic_2Q_gate_counts, transpiled_1Q_gate_counts, transpiled_2Q_gate_counts,mem_usage) = run()
# Define the range of qubits for the x-axis
num_qubits_range = range(min_qbits, max_qbits+1,skp_qubits)
print("num_qubits_range =",num_qubits_range)
# Calculate average creation time, elapsed time, quantum processing time, and circuit depth for each number of qubits
avg_creation_times = []
avg_elapsed_times = []
avg_quantum_times = []
avg_circuit_depths = []
avg_transpiled_depths = []
avg_1Q_algorithmic_gate_counts = []
avg_2Q_algorithmic_gate_counts = []
avg_1Q_Transpiled_gate_counts = []
avg_2Q_Transpiled_gate_counts = []
max_memory = []
start = 0
for num in numckts:
avg_creation_times.append(np.mean(creation_times[start:start+num]))
avg_elapsed_times.append(np.mean(elapsed_times[start:start+num]))
avg_quantum_times.append(np.mean(quantum_times[start:start+num]))
avg_circuit_depths.append(np.mean(circuit_depths[start:start+num]))
avg_transpiled_depths.append(np.mean(transpiled_depths[start:start+num]))
if gate_counts_plots == True:
avg_1Q_algorithmic_gate_counts.append(int(np.mean(algorithmic_1Q_gate_counts[start:start+num])))
avg_2Q_algorithmic_gate_counts.append(int(np.mean(algorithmic_2Q_gate_counts[start:start+num])))
avg_1Q_Transpiled_gate_counts.append(int(np.mean(transpiled_1Q_gate_counts[start:start+num])))
avg_2Q_Transpiled_gate_counts.append(int(np.mean(transpiled_2Q_gate_counts[start:start+num])))
if Memory_utilization_plot == True:max_memory.append(np.max(mem_usage[start:start+num]))
start += num
# Calculate the fidelity data
avg_f, avg_Hf = plot_fidelity_data(fidelity_data, Hf_fidelity_data, "Fidelity Comparison")
# Plot histograms for average creation time, average elapsed time, average quantum processing time, and average circuit depth versus the number of qubits
# Add labels to the bars
def autolabel(rects,ax,str='{:.3f}',va='top',text_color="black"):
for rect in rects:
height = rect.get_height()
ax.annotate(str.format(height), # Formatting to two decimal places
xy=(rect.get_x() + rect.get_width() / 2, height / 2),
xytext=(0, 0),
textcoords="offset points",
ha='center', va=va,color=text_color,rotation=90)
bar_width = 0.3
# Determine the number of subplots and their arrangement
if Memory_utilization_plot and gate_counts_plots:
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7) = plt.subplots(7, 1, figsize=(18, 30))
# Plotting for both memory utilization and gate counts
# ax1, ax2, ax3, ax4, ax5, ax6, ax7 are available
elif Memory_utilization_plot:
fig, (ax1, ax2, ax3, ax6, ax7) = plt.subplots(5, 1, figsize=(18, 30))
# Plotting for memory utilization only
# ax1, ax2, ax3, ax6, ax7 are available
elif gate_counts_plots:
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, figsize=(18, 30))
# Plotting for gate counts only
# ax1, ax2, ax3, ax4, ax5, ax6 are available
else:
fig, (ax1, ax2, ax3, ax6) = plt.subplots(4, 1, figsize=(18, 30))
# Default plotting
# ax1, ax2, ax3, ax6 are available
fig.suptitle(f"General Benchmarks : {platform} - {benchmark_name}", fontsize=16)
for i in range(len(avg_creation_times)): #converting seconds to milli seconds by multiplying 1000
avg_creation_times[i] *= 1000
ax1.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
x = ax1.bar(num_qubits_range, avg_creation_times, color='deepskyblue')
autolabel(ax1.patches, ax1)
ax1.set_xlabel('Number of Qubits')
ax1.set_ylabel('Average Creation Time (ms)')
ax1.set_title('Average Creation Time vs Number of Qubits',fontsize=14)
ax2.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
for i in range(len(avg_elapsed_times)): #converting seconds to milli seconds by multiplying 1000
avg_elapsed_times[i] *= 1000
for i in range(len(avg_quantum_times)): #converting seconds to milli seconds by multiplying 1000
avg_quantum_times[i] *= 1000
Elapsed= ax2.bar(np.array(num_qubits_range) - bar_width / 2, avg_elapsed_times, width=bar_width, color='cyan', label='Elapsed Time')
Quantum= ax2.bar(np.array(num_qubits_range) + bar_width / 2, avg_quantum_times,width=bar_width, color='deepskyblue',label ='Quantum Time')
autolabel(Elapsed,ax2,str='{:.1f}')
autolabel(Quantum,ax2,str='{:.1f}')
ax2.set_xlabel('Number of Qubits')
ax2.set_ylabel('Average Time (ms)')
ax2.set_title('Average Time vs Number of Qubits')
ax2.legend()
ax3.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized = ax3.bar(np.array(num_qubits_range) - bar_width / 2, avg_transpiled_depths, color='cyan', label='Normalized Depth', width=bar_width) # Adjust width here
Algorithmic = ax3.bar(np.array(num_qubits_range) + bar_width / 2,avg_circuit_depths, color='deepskyblue', label='Algorithmic Depth', width=bar_width) # Adjust width here
autolabel(Normalized,ax3,str='{:.2f}')
autolabel(Algorithmic,ax3,str='{:.2f}')
ax3.set_xlabel('Number of Qubits')
ax3.set_ylabel('Average Circuit Depth')
ax3.set_title('Average Circuit Depth vs Number of Qubits')
ax3.legend()
if gate_counts_plots == True:
ax4.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized_1Q_counts = ax4.bar(np.array(num_qubits_range) - bar_width / 2, avg_1Q_Transpiled_gate_counts, color='cyan', label='Normalized Gate Counts', width=bar_width) # Adjust width here
Algorithmic_1Q_counts = ax4.bar(np.array(num_qubits_range) + bar_width / 2, avg_1Q_algorithmic_gate_counts, color='deepskyblue', label='Algorithmic Gate Counts', width=bar_width) # Adjust width here
autolabel(Normalized_1Q_counts,ax4,str='{}')
autolabel(Algorithmic_1Q_counts,ax4,str='{}')
ax4.set_xlabel('Number of Qubits')
ax4.set_ylabel('Average 1-Qubit Gate Counts')
ax4.set_title('Average 1-Qubit Gate Counts vs Number of Qubits')
ax4.legend()
ax5.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized_2Q_counts = ax5.bar(np.array(num_qubits_range) - bar_width / 2, avg_2Q_Transpiled_gate_counts, color='cyan', label='Normalized Gate Counts', width=bar_width) # Adjust width here
Algorithmic_2Q_counts = ax5.bar(np.array(num_qubits_range) + bar_width / 2, avg_2Q_algorithmic_gate_counts, color='deepskyblue', label='Algorithmic Gate Counts', width=bar_width) # Adjust width here
autolabel(Normalized_2Q_counts,ax5,str='{}')
autolabel(Algorithmic_2Q_counts,ax5,str='{}')
ax5.set_xlabel('Number of Qubits')
ax5.set_ylabel('Average 2-Qubit Gate Counts')
ax5.set_title('Average 2-Qubit Gate Counts vs Number of Qubits')
ax5.legend()
ax6.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Hellinger = ax6.bar(np.array(num_qubits_range) - bar_width / 2, avg_Hf, width=bar_width, label='Hellinger Fidelity',color='cyan') # Adjust width here
Normalized = ax6.bar(np.array(num_qubits_range) + bar_width / 2, avg_f, width=bar_width, label='Normalized Fidelity', color='deepskyblue') # Adjust width here
autolabel(Hellinger,ax6,str='{:.2f}')
autolabel(Normalized,ax6,str='{:.2f}')
ax6.set_xlabel('Number of Qubits')
ax6.set_ylabel('Average Value')
ax6.set_title("Fidelity Comparison")
ax6.legend()
if Memory_utilization_plot == True:
ax7.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
x = ax7.bar(num_qubits_range, max_memory, color='turquoise', width=bar_width, label="Memory Utilizations")
autolabel(ax7.patches, ax7)
ax7.set_xlabel('Number of Qubits')
ax7.set_ylabel('Maximum Memory Utilized (MB)')
ax7.set_title('Memory Utilized vs Number of Qubits',fontsize=14)
plt.tight_layout(rect=[0, 0, 1, 0.96])
if saveplots == True:
plt.savefig("ParameterPlotsSample.jpg")
plt.show()
# Quantum Volume Plot
Suptitle = f"Volumetric Positioning - {platform}"
appname=benchmark_name
if QV_ == None:
QV=2048
else:
QV=QV_
depth_base =2
ax = plot_volumetric_background(max_qubits=max_qbits, QV=QV,depth_base=depth_base, suptitle=Suptitle, colorbar_label="Avg Result Fidelity")
w_data = num_qubits_range
# determine width for circuit
w_max = 0
for i in range(len(w_data)):
y = float(w_data[i])
w_max = max(w_max, y)
d_tr_data = avg_transpiled_depths
f_data = avg_f
plot_volumetric_data(ax, w_data, d_tr_data, f_data, depth_base, fill=True,label=appname, labelpos=(0.4, 0.6), labelrot=15, type=1, w_max=w_max)
anno_volumetric_data(ax, depth_base,label=appname, labelpos=(0.4, 0.6), labelrot=15, type=1, fill=False)
|
https://github.com/rickapocalypse/final_paper_qiskit_sat
|
rickapocalypse
|
from qiskit import *
import matplotlib.pyplot as plt
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import PhaseOracle
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.monitor import job_monitor
# Criando o circuito grover #
def reflection(n):
qc = QuantumCircuit(n)
qc.h(list(range(0,n)))
qc.x(list(range(0,n)))
# A porta Multi-controladora-Z
qc.h(n-1)
qc.mct(list(range(n-1)), n-1) # Multi-controladora toff
qc.h(n-1)
qc.x(list(range(0,n)))
qc.h(list(range(0,n)))
return qc
# Porta Oraculo
n = 4
expr = '(a & ~b & ~c) | (~a & ~b & d) | ~(a | d & c) & (a|~d |~c) | (a & b & ~c)'
oracle = PhaseOracle(expr)
oracle.to_gate()
# Porta de Grover
g = reflection(n)
g.to_gate()
g.name = 'G'
backend = Aer.get_backend('qasm_simulator')
grover_circuit = QuantumCircuit(n,n)
grover_circuit.h(list(range(0,n)))
grover_circuit.append(oracle, list(range(0,n)))
grover_circuit.append(g, list(range(0,n)))
grover_circuit.measure(list(range(0,n)),list(range(0,n)))
grover_circuit.draw(output='mpl')
oracle.draw(output='mpl')
g.draw(output='mpl')
job = execute(grover_circuit, backend, shots = 1024)
result = job.result()
counts = result.get_counts()
plot_histogram(counts)
plt.show()
IBMQ.load_account()
host = IBMQ.get_provider('ibm-q')
quantum_computer = host.get_backend('ibmq_lima')
result_qcomputer = execute(grover_circuit, backend= quantum_computer)
job_monitor(result_qcomputer)
result_qcomputer = result_qcomputer.result()
plot_histogram([result_qcomputer.get_counts(grover_circuit), counts], legend=["Qcumputer","simulated"], bar_labels=True)
plt.ylabel("probabilidades")
plt.xlabel("PossΓveis CombinaΓ§Γ΅es")
plt.show()
|
https://github.com/1chooo/Quantum-Oracle
|
1chooo
|
from qiskit import QuantumCircuit, QuantumRegister
qrx = QuantumRegister(3, 'x')
qry = QuantumRegister(1, 'y')
qc = QuantumCircuit(qrx, qry)
qc.x(qry)
qc.draw("mpl")
|
https://github.com/usamisaori/qLipschitz
|
usamisaori
|
from sklearn.datasets import load_iris
import pennylane as qml
import numpy as np
from pennylane.optimize import AdamOptimizer
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
X, Y = load_iris(return_X_y=True)
X_train_0 = X[0:30]
X_train_1 = X[50:80]
X_train = np.vstack((X_train_0, X_train_1))
Y_train_0 = Y[0:30]
Y_train_1 = Y[50:80]
Y_train = np.vstack((Y_train_0, Y_train_1)).flatten()
X, Y = load_iris(return_X_y=True)
X_test_0 = X[30:50]
X_test_1 = X[80:100]
X_test = np.vstack((X_test_0, X_test_1))
Y_test_0 = Y[30:50]
Y_test_1 = Y[80:100]
Y_test = np.vstack((Y_test_0, Y_test_1)).flatten()
X_train = (X_train - X_train.min(axis=0)) / (X_train.max(axis=0) - X_train.min(axis=0))
X_test = (X_test - X_test.min(axis=0)) / (X_test.max(axis=0) - X_test.min(axis=0))
qubits_num = 4
layers_num = 2
dev = qml.device("default.qubit", wires=qubits_num)
class VQC:
def __init__(self):
# 3 => U3(theta, phi, lambda)
self.params = (0.1 * np.random.randn(layers_num, qubits_num, 3))
self.bestparams = self.params
self.bestcost = 10
self.opt = AdamOptimizer(0.125)
self.weights = []
self.costs = []
self.accuracies = []
def fit(self, X_train, Y_train, epoch=300):
batch_size = 20
for turn in range(epoch):
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train), (batch_size,))
X_train_batch = X_train[batch_index]
Y_train_batch = Y_train[batch_index]
self.params = self.opt.step(lambda v: cost(v, X_train_batch, Y_train_batch), self.params)
cost_now = cost(self.params, X_train, Y_train)
acc_now = accuracy(self.params, X_train, Y_train)
if cost_now < self.bestcost:
self.bestcost = cost_now
self.bestparams = self.params
self.weights.append(self.params)
self.costs.append(cost_now)
self.accuracies.append(acc_now)
print(
"Turn: {:5d} | Cost: {:0.7f} | Accuracy: {:0.2f}% ".format(
turn, cost_now, acc_now * 100
))
def score(self, X_test, Y_test):
predictions = [ predict(self.bestparams, data) for data in X_test ]
acc = accuracy(self.bestparams, X_test, Y_test)
print("FINAL ACCURACY: {:0.2f}%".format(acc * 100))
@qml.qnode(dev)
def circuit(params, data):
angles = [ i * np.pi for i in data ]
for i in range(qubits_num):
qml.RX(angles[i], wires=i)
qml.Rot( *params[0, i], wires=i )
qml.CZ(wires=[1, 0])
qml.CZ(wires=[1, 2])
qml.CZ(wires=[2, 3])
qml.CZ(wires=[0, 3])
for i in range(qubits_num):
qml.Rot( *params[1, i], wires=i )
# PauliZ measure => 1 -> |0> while -1 -> |1>
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliZ(3))
def cost(weights, datas, labels):
loss = 0
for i, data in enumerate(datas):
# like [-1, 1, 1]
measured = circuit(weights, data)
p = measured[0]
if labels[i] == 0:
loss += (1 - p) ** 2
else:
loss += (-1 - p) ** 2
return loss / len(datas)
import qiskit
import numpy as np
from qiskit import QuantumCircuit
from qiskit import Aer, execute
unitary_backend = Aer.get_backend('unitary_simulator')
qasm_backend = Aer.get_backend('qasm_simulator')
def predict(params, data):
qcircuit = QuantumCircuit(4, 4)
qubits = qcircuit.qubits
for i, d in enumerate(data):
qcircuit.rx(d * np.pi, qubits[i])
for i in range(qubits_num):
qcircuit.u3(*params[0][i], qubits[i])
qcircuit.cz(qubits[0], qubits[1])
qcircuit.cz(qubits[1], qubits[2])
qcircuit.cz(qubits[2], qubits[3])
qcircuit.cz(qubits[0], qubits[3])
for i in range(qubits_num):
qcircuit.u3(*params[1][i], qubits[i])
# the measurement
qcircuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
# job execution
shots = 1000
job_sim = execute(qcircuit, qasm_backend, shots=shots)
result_sim = job_sim.result()
counts = result_sim.get_counts(qcircuit)
p1 = (counts.get('1000', 0) + counts.get('1001', 0) + counts.get('1010', 0) + counts.get('1011',0) + \
counts.get('1100', 0) + counts.get('1101', 0) + counts.get('1110', 0) + counts.get('1111', 0)) / shots
if p1 > 0.5:
return 1
else:
return 0
def accuracy(weights, datas, labels):
predictions = [ predict(weights, data) for data in datas ]
acc = 0
for i, p in enumerate(predictions):
if p == labels[i]:
acc += 1
return acc / len(predictions)
vqc = VQC()
vqc.fit(X_train, Y_train, epoch=10)
vqc.score(X_test, Y_test)
vqc.bestparams
def createCircuit(params, data):
qcircuit = QuantumCircuit(4, 4)
qubits = qcircuit.qubits
for i, d in enumerate(data):
qcircuit.rx(d * np.pi, qubits[i])
for i in range(qubits_num):
qcircuit.u3(*params[0][i], qubits[i])
qcircuit.cz(qubits[0], qubits[1])
qcircuit.cz(qubits[1], qubits[2])
qcircuit.cz(qubits[2], qubits[3])
qcircuit.cz(qubits[0], qubits[3])
for i in range(qubits_num):
qcircuit.u3(*params[1][i], qubits[i])
return qcircuit
qcircuit = createCircuit(vqc.bestparams, X_train[0])
qcircuit.draw(output='mpl')
|
https://github.com/GabrielPontolillo/Quantum_Algorithm_Implementations
|
GabrielPontolillo
|
from qiskit import QuantumCircuit
def create_bell_pair():
qc = QuantumCircuit(2)
qc.h(1)
### replaced cx gate ###
qc.cy(1, 0)
return qc
def encode_message(qc, qubit, msg):
if len(msg) != 2 or not set([0,1]).issubset({0,1}):
raise ValueError(f"message '{msg}' is invalid")
if msg[1] == "1":
qc.x(qubit)
if msg[0] == "1":
qc.z(qubit)
return qc
def decode_message(qc):
qc.cx(1, 0)
qc.h(1)
return qc
|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
import numpy as np
import matplotlib.pyplot as plt
import itertools
from pprint import pprint
# plt.rcParams.update({'font.size': 16}) # enlarge matplotlib fonts
import pickle
import time
import datetime
# Import qubit states Zero (|0>) and One (|1>), and Pauli operators (X, Y, Z)
from qiskit.opflow import Zero, One, I, X, Y, Z
from qiskit import QuantumCircuit, QuantumRegister, IBMQ, execute, transpile, Aer
from qiskit.tools.monitor import job_monitor
from qiskit.circuit import Parameter
from qiskit.transpiler.passes import RemoveBarriers
# Import QREM package
from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
from qiskit.ignis.mitigation import expectation_value
# Import mitiq for zne
import mitiq
# Import state tomography modules
from qiskit.ignis.verification.tomography import state_tomography_circuits
from qiskit.quantum_info import state_fidelity
import sys
import importlib
sys.path.append("./")
import circuit_utils, zne_utils, tomography_utils, sgs_algorithm
importlib.reload(circuit_utils)
importlib.reload(zne_utils)
importlib.reload(tomography_utils)
importlib.reload(sgs_algorithm)
from circuit_utils import *
from zne_utils import *
from tomography_utils import *
from sgs_algorithm import *
# Combine subcircuits into a single multiqubit gate representing a single trotter step
num_qubits = 3
# The final time of the state evolution
target_time = np.pi
# Parameterize variable t to be evaluated at t=pi later
dt = Parameter('t')
# Convert custom quantum circuit into a gate
trot_gate = trotter_gate(dt)
# initial layout
initial_layout = [5,3,1]
# Number of trotter steps
num_steps = 100
print("trotter step: ", num_steps)
# Initialize quantum circuit for 3 qubits
qr = QuantumRegister(num_qubits, name="lq")
qc = QuantumCircuit(qr)
# Prepare initial state (remember we are only evolving 3 of the 7 qubits on jakarta qubits (q_5, q_3, q_1) corresponding to the state |110>)
make_initial_state(qc, "110") # DO NOT MODIFY (|q_5,q_3,q_1> = |110>)
subspace_encoder_init110(qc, targets=[0, 1, 2]) # encode
trotterize(qc, trot_gate, num_steps, targets=[1, 2]) # Simulate time evolution under H_heis3 Hamiltonian
subspace_decoder_init110(qc, targets=[0, 1, 2]) # decode
# Evaluate simulation at target_time (t=pi) meaning each trotter step evolves pi/trotter_steps in time
qc = qc.bind_parameters({dt: target_time / num_steps})
print("created qc")
# Generate state tomography circuits to evaluate fidelity of simulation
st_qcs = state_tomography_circuits(qc, [0, 1, 2][::-1]) #! state tomography requires === BIG ENDIAN ===
print("created st_qcs (length:", len(st_qcs), ")")
# remove barriers
st_qcs = [RemoveBarriers()(qc) for qc in st_qcs]
print("removed barriers from st_qcs")
# optimize circuit
t3_st_qcs = transpile(st_qcs, optimization_level=3, basis_gates=["sx", "cx", "rz"])
print("created t3_st_qcs (length:", len(t3_st_qcs), ")")
# zne wrapping
zne_qcs = zne_wrapper(t3_st_qcs)
print("created zne_qcs (length:", len(zne_qcs), ")")
t3_zne_qcs = transpile(zne_qcs, optimization_level=0, basis_gates=["sx", "cx", "rz"], initial_layout=initial_layout)
print("created t3_zne_qcs (length:", len(t3_zne_qcs), ")")
t3_zne_qcs[-3].draw("mpl")
from qiskit.test.mock import FakeJakarta
# backend = FakeJakarta()
# backend = Aer.get_backend("qasm_simulator")
IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q-utokyo', group='internal', project='hirashi-jst')
provider = IBMQ.get_provider(hub='ibm-q-community', group='ibmquantumawards', project='open-science-22')
print("provider:", provider)
backend = provider.get_backend("ibmq_jakarta")
# QREM
shots = 1 << 13
qr = QuantumRegister(num_qubits)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
cal_job = execute(meas_calibs, backend=backend, shots=shots, optimization_level=3, initial_layout = initial_layout)
print('Job ID', cal_job.job_id())
shots = 1 << 13
reps = 8 # unused
jobs = []
for _ in range(reps):
job = execute(t3_zne_qcs, backend, shots=shots) # ζ―εγγ§γγ―: γγγ‘γγγ¨ε€γγοΌ
print('Job ID', job.job_id())
jobs.append(job)
dt_now = datetime.datetime.now()
import pickle
with open("jobs_jakarta_100step_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
pickle.dump({"jobs": jobs, "cal_job": cal_job}, f)
with open("job_ids_jakarta_100step_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
pickle.dump({"job_ids": [job.job_id() for job in jobs], "cal_job_id": cal_job.job_id()}, f)
with open("properties_jakarta" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
pickle.dump(backend.properties(), f)
cal_results = cal_job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
target_state = (One^One^Zero).to_matrix() # DO NOT CHANGE!!!
fids = []
for job in jobs:
mit_results = meas_fitter.filter.apply(job.result())
zne_expvals = zne_decoder(num_qubits, mit_results)
rho = expvals_to_valid_rho(num_qubits, zne_expvals)
fid = state_fidelity(rho, target_state)
fids.append(fid)
print('state tomography fidelity = {:.4f} \u00B1 {:.4f}'.format(np.mean(fids), np.std(fids)))
|
https://github.com/abbarreto/qiskit4
|
abbarreto
|
#Assign these values as per your requirements.
global min_qubits,max_qubits,skip_qubits,max_circuits,num_shots,Noise_Inclusion
min_qubits=4
max_qubits=15 #reference files are upto 12 Qubits only
skip_qubits=2
max_circuits=3
num_shots=4092
gate_counts_plots = True
Noise_Inclusion = False
saveplots = False
Memory_utilization_plot = True
Type_of_Simulator = "built_in" #Inputs are "built_in" or "FAKE" or "FAKEV2"
backend_name = "FakeGuadalupeV2" #Can refer to the README files for the available backends
#Change your Specification of Simulator in Declaring Backend Section
#By Default : built_in -> qasm_simulator and FAKE -> FakeSantiago() and FAKEV2 -> FakeSantiagoV2()
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, transpile, execute
from qiskit.opflow import PauliTrotterEvolution, Suzuki
from qiskit.opflow.primitive_ops import PauliSumOp
import time,os,json
import matplotlib.pyplot as plt
# Import from Qiskit Aer noise module
from qiskit_aer.noise import (NoiseModel, QuantumError, ReadoutError,pauli_error, depolarizing_error, thermal_relaxation_error,reset_error)
# Benchmark Name
benchmark_name = "VQE Simulation"
# Selection of basis gate set for transpilation
# Note: selector 1 is a hardware agnostic gate set
basis_selector = 1
basis_gates_array = [
[],
['rx', 'ry', 'rz', 'cx'], # a common basis set, default
['cx', 'rz', 'sx', 'x'], # IBM default basis set
['rx', 'ry', 'rxx'], # IonQ default basis set
['h', 'p', 'cx'], # another common basis set
['u', 'cx'] # general unitaries basis gates
]
np.random.seed(0)
def get_QV(backend):
import json
# Assuming backend.conf_filename is the filename and backend.dirname is the directory path
conf_filename = backend.dirname + "/" + backend.conf_filename
# Open the JSON file
with open(conf_filename, 'r') as file:
# Load the JSON data
data = json.load(file)
# Extract the quantum_volume parameter
QV = data.get('quantum_volume', None)
return QV
def checkbackend(backend_name,Type_of_Simulator):
if Type_of_Simulator == "built_in":
available_backends = []
for i in Aer.backends():
available_backends.append(i.name)
if backend_name in available_backends:
platform = backend_name
return platform
else:
print(f"incorrect backend name or backend not available. Using qasm_simulator by default !!!!")
print(f"available backends are : {available_backends}")
platform = "qasm_simulator"
return platform
elif Type_of_Simulator == "FAKE" or Type_of_Simulator == "FAKEV2":
import qiskit.providers.fake_provider as fake_backends
if hasattr(fake_backends,backend_name) is True:
print(f"Backend {backend_name} is available for type {Type_of_Simulator}.")
backend_class = getattr(fake_backends,backend_name)
backend_instance = backend_class()
return backend_instance
else:
print(f"Backend {backend_name} is not available or incorrect for type {Type_of_Simulator}. Executing with FakeSantiago!!!")
if Type_of_Simulator == "FAKEV2":
backend_class = getattr(fake_backends,"FakeSantiagoV2")
else:
backend_class = getattr(fake_backends,"FakeSantiago")
backend_instance = backend_class()
return backend_instance
if Type_of_Simulator == "built_in":
platform = checkbackend(backend_name,Type_of_Simulator)
#By default using "Qasm Simulator"
backend = Aer.get_backend(platform)
QV_=None
print(f"{platform} device is capable of running {backend.num_qubits}")
print(f"backend version is {backend.backend_version}")
elif Type_of_Simulator == "FAKE":
basis_selector = 0
backend = checkbackend(backend_name,Type_of_Simulator)
QV_ = get_QV(backend)
platform = backend.properties().backend_name +"-"+ backend.properties().backend_version #Replace this string with the backend Provider's name as this is used for Plotting.
max_qubits=backend.configuration().n_qubits
print(f"{platform} device is capable of running {backend.configuration().n_qubits}")
print(f"{platform} has QV={QV_}")
if max_qubits > 30:
print(f"Device is capable with max_qubits = {max_qubits}")
max_qubit = 30
print(f"Using fake backend {platform} with max_qubits {max_qubits}")
elif Type_of_Simulator == "FAKEV2":
basis_selector = 0
if "V2" not in backend_name:
backend_name = backend_name+"V2"
backend = checkbackend(backend_name,Type_of_Simulator)
QV_ = get_QV(backend)
platform = backend.name +"-" +backend.backend_version
max_qubits=backend.num_qubits
print(f"{platform} device is capable of running {backend.num_qubits}")
print(f"{platform} has QV={QV_}")
if max_qubits > 30:
print(f"Device is capable with max_qubits = {max_qubits}")
max_qubit = 30
print(f"Using fake backend {platform} with max_qubits {max_qubits}")
else:
print("Enter valid Simulator.....")
# saved circuits for display
QC_ = None
Hf_ = None
CO_ = None
################### Circuit Definition #######################################
# Construct a Qiskit circuit for VQE Energy evaluation with UCCSD ansatz
# param: n_spin_orbs - The number of spin orbitals.
# return: return a Qiskit circuit for this VQE ansatz
def VQEEnergy(n_spin_orbs, na, nb, circuit_id=0, method=1):
# number of alpha spin orbitals
norb_a = int(n_spin_orbs / 2)
# construct the Hamiltonian
qubit_op = ReadHamiltonian(n_spin_orbs)
# allocate qubits
num_qubits = n_spin_orbs
qr = QuantumRegister(num_qubits)
qc = QuantumCircuit(qr, name=f"vqe-ansatz({method})-{num_qubits}-{circuit_id}")
# initialize the HF state
Hf = HartreeFock(num_qubits, na, nb)
qc.append(Hf, qr)
# form the list of single and double excitations
excitationList = []
for occ_a in range(na):
for vir_a in range(na, norb_a):
excitationList.append((occ_a, vir_a))
for occ_b in range(norb_a, norb_a+nb):
for vir_b in range(norb_a+nb, n_spin_orbs):
excitationList.append((occ_b, vir_b))
for occ_a in range(na):
for vir_a in range(na, norb_a):
for occ_b in range(norb_a, norb_a+nb):
for vir_b in range(norb_a+nb, n_spin_orbs):
excitationList.append((occ_a, vir_a, occ_b, vir_b))
# get cluster operators in Paulis
pauli_list = readPauliExcitation(n_spin_orbs, circuit_id)
# loop over the Pauli operators
for index, PauliOp in enumerate(pauli_list):
# get circuit for exp(-iP)
cluster_qc = ClusterOperatorCircuit(PauliOp, excitationList[index])
# add to ansatz
qc.append(cluster_qc, [i for i in range(cluster_qc.num_qubits)])
# method 1, only compute the last term in the Hamiltonian
if method == 1:
# last term in Hamiltonian
qc_with_mea, is_diag = ExpectationCircuit(qc, qubit_op[1], num_qubits)
# return the circuit
return qc_with_mea
# now we need to add the measurement parts to the circuit
# circuit list
qc_list = []
diag = []
off_diag = []
global normalization
normalization = 0.0
# add the first non-identity term
identity_qc = qc.copy()
identity_qc.measure_all()
qc_list.append(identity_qc) # add to circuit list
diag.append(qubit_op[1])
normalization += abs(qubit_op[1].coeffs[0]) # add to normalization factor
diag_coeff = abs(qubit_op[1].coeffs[0]) # add to coefficients of diagonal terms
# loop over rest of terms
for index, p in enumerate(qubit_op[2:]):
# get the circuit with expectation measurements
qc_with_mea, is_diag = ExpectationCircuit(qc, p, num_qubits)
# accumulate normalization
normalization += abs(p.coeffs[0])
# add to circuit list if non-diagonal
if not is_diag:
qc_list.append(qc_with_mea)
else:
diag_coeff += abs(p.coeffs[0])
# diagonal term
if is_diag:
diag.append(p)
# off-diagonal term
else:
off_diag.append(p)
# modify the name of diagonal circuit
qc_list[0].name = qubit_op[1].primitive.to_list()[0][0] + " " + str(np.real(diag_coeff))
normalization /= len(qc_list)
return qc_list
# Function that constructs the circuit for a given cluster operator
def ClusterOperatorCircuit(pauli_op, excitationIndex):
# compute exp(-iP)
exp_ip = pauli_op.exp_i()
# Trotter approximation
qc_op = PauliTrotterEvolution(trotter_mode=Suzuki(order=1, reps=1)).convert(exp_ip)
# convert to circuit
qc = qc_op.to_circuit(); qc.name = f'Cluster Op {excitationIndex}'
global CO_
if CO_ == None or qc.num_qubits <= 4:
if qc.num_qubits < 7: CO_ = qc
# return this circuit
return qc
# Function that adds expectation measurements to the raw circuits
def ExpectationCircuit(qc, pauli, nqubit, method=2):
# copy the unrotated circuit
raw_qc = qc.copy()
# whether this term is diagonal
is_diag = True
# primitive Pauli string
PauliString = pauli.primitive.to_list()[0][0]
# coefficient
coeff = pauli.coeffs[0]
# basis rotation
for i, p in enumerate(PauliString):
target_qubit = nqubit - i - 1
if (p == "X"):
is_diag = False
raw_qc.h(target_qubit)
elif (p == "Y"):
raw_qc.sdg(target_qubit)
raw_qc.h(target_qubit)
is_diag = False
# perform measurements
raw_qc.measure_all()
# name of this circuit
raw_qc.name = PauliString + " " + str(np.real(coeff))
# save circuit
global QC_
if QC_ == None or nqubit <= 4:
if nqubit < 7: QC_ = raw_qc
return raw_qc, is_diag
# Function that implements the Hartree-Fock state
def HartreeFock(norb, na, nb):
# initialize the quantum circuit
qc = QuantumCircuit(norb, name="Hf")
# alpha electrons
for ia in range(na):
qc.x(ia)
# beta electrons
for ib in range(nb):
qc.x(ib+int(norb/2))
# Save smaller circuit
global Hf_
if Hf_ == None or norb <= 4:
if norb < 7: Hf_ = qc
# return the circuit
return qc
################ Helper Functions
# Function that converts a list of single and double excitation operators to Pauli operators
def readPauliExcitation(norb, circuit_id=0):
# load pre-computed data
filename = os.path.join(f'ansatzes/{norb}_qubit_{circuit_id}.txt')
with open(filename) as f:
data = f.read()
ansatz_dict = json.loads(data)
# initialize Pauli list
pauli_list = []
# current coefficients
cur_coeff = 1e5
# current Pauli list
cur_list = []
# loop over excitations
for ext in ansatz_dict:
if cur_coeff > 1e4:
cur_coeff = ansatz_dict[ext]
cur_list = [(ext, ansatz_dict[ext])]
elif abs(abs(ansatz_dict[ext]) - abs(cur_coeff)) > 1e-4:
pauli_list.append(PauliSumOp.from_list(cur_list))
cur_coeff = ansatz_dict[ext]
cur_list = [(ext, ansatz_dict[ext])]
else:
cur_list.append((ext, ansatz_dict[ext]))
# add the last term
pauli_list.append(PauliSumOp.from_list(cur_list))
# return Pauli list
return pauli_list
# Get the Hamiltonian by reading in pre-computed file
def ReadHamiltonian(nqubit):
# load pre-computed data
filename = os.path.join(f'Hamiltonians/{nqubit}_qubit.txt')
with open(filename) as f:
data = f.read()
ham_dict = json.loads(data)
# pauli list
pauli_list = []
for p in ham_dict:
pauli_list.append( (p, ham_dict[p]) )
# build Hamiltonian
ham = PauliSumOp.from_list(pauli_list)
# return Hamiltonian
return ham
# Create an empty noise model
noise_parameters = NoiseModel()
if Type_of_Simulator == "built_in":
# Add depolarizing error to all single qubit gates with error rate 0.05% and to all two qubit gates with error rate 0.5%
depol_one_qb_error = 0.05
depol_two_qb_error = 0.005
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(depol_one_qb_error, 1), ['rx', 'ry', 'rz'])
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(depol_two_qb_error, 2), ['cx'])
# Add amplitude damping error to all single qubit gates with error rate 0.0% and to all two qubit gates with error rate 0.0%
amp_damp_one_qb_error = 0.0
amp_damp_two_qb_error = 0.0
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(amp_damp_one_qb_error, 1), ['rx', 'ry', 'rz'])
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(amp_damp_two_qb_error, 2), ['cx'])
# Add reset noise to all single qubit resets
reset_to_zero_error = 0.005
reset_to_one_error = 0.005
noise_parameters.add_all_qubit_quantum_error(reset_error(reset_to_zero_error, reset_to_one_error),["reset"])
# Add readout error
p0given1_error = 0.000
p1given0_error = 0.000
error_meas = ReadoutError([[1 - p1given0_error, p1given0_error], [p0given1_error, 1 - p0given1_error]])
noise_parameters.add_all_qubit_readout_error(error_meas)
#print(noise_parameters)
elif Type_of_Simulator == "FAKE"or"FAKEV2":
noise_parameters = NoiseModel.from_backend(backend)
#print(noise_parameters)
### Analysis methods to be expanded and eventually compiled into a separate analysis.py file
import math, functools
def hellinger_fidelity_with_expected(p, q):
""" p: result distribution, may be passed as a counts distribution
q: the expected distribution to be compared against
References:
`Hellinger Distance @ wikipedia <https://en.wikipedia.org/wiki/Hellinger_distance>`_
Qiskit Hellinger Fidelity Function
"""
p_sum = sum(p.values())
q_sum = sum(q.values())
if q_sum == 0:
print("ERROR: polarization_fidelity(), expected distribution is invalid, all counts equal to 0")
return 0
p_normed = {}
for key, val in p.items():
p_normed[key] = val/p_sum
# if p_sum != 0:
# p_normed[key] = val/p_sum
# else:
# p_normed[key] = 0
q_normed = {}
for key, val in q.items():
q_normed[key] = val/q_sum
total = 0
for key, val in p_normed.items():
if key in q_normed.keys():
total += (np.sqrt(val) - np.sqrt(q_normed[key]))**2
del q_normed[key]
else:
total += val
total += sum(q_normed.values())
# in some situations (error mitigation) this can go negative, use abs value
if total < 0:
print(f"WARNING: using absolute value in fidelity calculation")
total = abs(total)
dist = np.sqrt(total)/np.sqrt(2)
fidelity = (1-dist**2)**2
return fidelity
def polarization_fidelity(counts, correct_dist, thermal_dist=None):
"""
Combines Hellinger fidelity and polarization rescaling into fidelity calculation
used in every benchmark
counts: the measurement outcomes after `num_shots` algorithm runs
correct_dist: the distribution we expect to get for the algorithm running perfectly
thermal_dist: optional distribution to pass in distribution from a uniform
superposition over all states. If `None`: generated as
`uniform_dist` with the same qubits as in `counts`
returns both polarization fidelity and the hellinger fidelity
Polarization from: `https://arxiv.org/abs/2008.11294v1`
"""
num_measured_qubits = len(list(correct_dist.keys())[0])
#print(num_measured_qubits)
counts = {k.zfill(num_measured_qubits): v for k, v in counts.items()}
# calculate hellinger fidelity between measured expectation values and correct distribution
hf_fidelity = hellinger_fidelity_with_expected(counts,correct_dist)
# to limit cpu and memory utilization, skip noise correction if more than 16 measured qubits
if num_measured_qubits > 16:
return { 'fidelity':hf_fidelity, 'hf_fidelity':hf_fidelity }
# if not provided, generate thermal dist based on number of qubits
if thermal_dist == None:
thermal_dist = uniform_dist(num_measured_qubits)
# set our fidelity rescaling value as the hellinger fidelity for a depolarized state
floor_fidelity = hellinger_fidelity_with_expected(thermal_dist, correct_dist)
# rescale fidelity result so uniform superposition (random guessing) returns fidelity
# rescaled to 0 to provide a better measure of success of the algorithm (polarization)
new_floor_fidelity = 0
fidelity = rescale_fidelity(hf_fidelity, floor_fidelity, new_floor_fidelity)
return { 'fidelity':fidelity, 'hf_fidelity':hf_fidelity }
## Uniform distribution function commonly used
def rescale_fidelity(fidelity, floor_fidelity, new_floor_fidelity):
"""
Linearly rescales our fidelities to allow comparisons of fidelities across benchmarks
fidelity: raw fidelity to rescale
floor_fidelity: threshold fidelity which is equivalent to random guessing
new_floor_fidelity: what we rescale the floor_fidelity to
Ex, with floor_fidelity = 0.25, new_floor_fidelity = 0.0:
1 -> 1;
0.25 -> 0;
0.5 -> 0.3333;
"""
rescaled_fidelity = (1-new_floor_fidelity)/(1-floor_fidelity) * (fidelity - 1) + 1
# ensure fidelity is within bounds (0, 1)
if rescaled_fidelity < 0:
rescaled_fidelity = 0.0
if rescaled_fidelity > 1:
rescaled_fidelity = 1.0
return rescaled_fidelity
def uniform_dist(num_state_qubits):
dist = {}
for i in range(2**num_state_qubits):
key = bin(i)[2:].zfill(num_state_qubits)
dist[key] = 1/(2**num_state_qubits)
return dist
from matplotlib.patches import Rectangle
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize
from matplotlib.patches import Circle
############### Color Map functions
# Create a selection of colormaps from which to choose; default to custom_spectral
cmap_spectral = plt.get_cmap('Spectral')
cmap_greys = plt.get_cmap('Greys')
cmap_blues = plt.get_cmap('Blues')
cmap_custom_spectral = None
# the default colormap is the spectral map
cmap = cmap_spectral
cmap_orig = cmap_spectral
# current cmap normalization function (default None)
cmap_norm = None
default_fade_low_fidelity_level = 0.16
default_fade_rate = 0.7
# Specify a normalization function here (default None)
def set_custom_cmap_norm(vmin, vmax):
global cmap_norm
if vmin == vmax or (vmin == 0.0 and vmax == 1.0):
print("... setting cmap norm to None")
cmap_norm = None
else:
print(f"... setting cmap norm to [{vmin}, {vmax}]")
cmap_norm = Normalize(vmin=vmin, vmax=vmax)
# Remake the custom spectral colormap with user settings
def set_custom_cmap_style(
fade_low_fidelity_level=default_fade_low_fidelity_level,
fade_rate=default_fade_rate):
#print("... set custom map style")
global cmap, cmap_custom_spectral, cmap_orig
cmap_custom_spectral = create_custom_spectral_cmap(
fade_low_fidelity_level=fade_low_fidelity_level, fade_rate=fade_rate)
cmap = cmap_custom_spectral
cmap_orig = cmap_custom_spectral
# Create the custom spectral colormap from the base spectral
def create_custom_spectral_cmap(
fade_low_fidelity_level=default_fade_low_fidelity_level,
fade_rate=default_fade_rate):
# determine the breakpoint from the fade level
num_colors = 100
breakpoint = round(fade_low_fidelity_level * num_colors)
# get color list for spectral map
spectral_colors = [cmap_spectral(v/num_colors) for v in range(num_colors)]
#print(fade_rate)
# create a list of colors to replace those below the breakpoint
# and fill with "faded" color entries (in reverse)
low_colors = [0] * breakpoint
#for i in reversed(range(breakpoint)):
for i in range(breakpoint):
# x is index of low colors, normalized 0 -> 1
x = i / breakpoint
# get color at this index
bc = spectral_colors[i]
r0 = bc[0]
g0 = bc[1]
b0 = bc[2]
z0 = bc[3]
r_delta = 0.92 - r0
#print(f"{x} {bc} {r_delta}")
# compute saturation and greyness ratio
sat_ratio = 1 - x
#grey_ratio = 1 - x
''' attempt at a reflective gradient
if i >= breakpoint/2:
xf = 2*(x - 0.5)
yf = pow(xf, 1/fade_rate)/2
grey_ratio = 1 - (yf + 0.5)
else:
xf = 2*(0.5 - x)
yf = pow(xf, 1/fade_rate)/2
grey_ratio = 1 - (0.5 - yf)
'''
grey_ratio = 1 - math.pow(x, 1/fade_rate)
#print(f" {xf} {yf} ")
#print(f" {sat_ratio} {grey_ratio}")
r = r0 + r_delta * sat_ratio
g_delta = r - g0
b_delta = r - b0
g = g0 + g_delta * grey_ratio
b = b0 + b_delta * grey_ratio
#print(f"{r} {g} {b}\n")
low_colors[i] = (r,g,b,z0)
#print(low_colors)
# combine the faded low colors with the regular spectral cmap to make a custom version
cmap_custom_spectral = ListedColormap(low_colors + spectral_colors[breakpoint:])
#spectral_colors = [cmap_custom_spectral(v/10) for v in range(10)]
#for i in range(10): print(spectral_colors[i])
#print("")
return cmap_custom_spectral
# Make the custom spectral color map the default on module init
set_custom_cmap_style()
# Arrange the stored annotations optimally and add to plot
def anno_volumetric_data(ax, depth_base=2, label='Depth',
labelpos=(0.2, 0.7), labelrot=0, type=1, fill=True):
# sort all arrays by the x point of the text (anno_offs)
global x_anno_offs, y_anno_offs, anno_labels, x_annos, y_annos
all_annos = sorted(zip(x_anno_offs, y_anno_offs, anno_labels, x_annos, y_annos))
x_anno_offs = [a for a,b,c,d,e in all_annos]
y_anno_offs = [b for a,b,c,d,e in all_annos]
anno_labels = [c for a,b,c,d,e in all_annos]
x_annos = [d for a,b,c,d,e in all_annos]
y_annos = [e for a,b,c,d,e in all_annos]
#print(f"{x_anno_offs}")
#print(f"{y_anno_offs}")
#print(f"{anno_labels}")
for i in range(len(anno_labels)):
x_anno = x_annos[i]
y_anno = y_annos[i]
x_anno_off = x_anno_offs[i]
y_anno_off = y_anno_offs[i]
label = anno_labels[i]
if i > 0:
x_delta = abs(x_anno_off - x_anno_offs[i - 1])
y_delta = abs(y_anno_off - y_anno_offs[i - 1])
if y_delta < 0.7 and x_delta < 2:
y_anno_off = y_anno_offs[i] = y_anno_offs[i - 1] - 0.6
#x_anno_off = x_anno_offs[i] = x_anno_offs[i - 1] + 0.1
ax.annotate(label,
xy=(x_anno+0.0, y_anno+0.1),
arrowprops=dict(facecolor='black', shrink=0.0,
width=0.5, headwidth=4, headlength=5, edgecolor=(0.8,0.8,0.8)),
xytext=(x_anno_off + labelpos[0], y_anno_off + labelpos[1]),
rotation=labelrot,
horizontalalignment='left', verticalalignment='baseline',
color=(0.2,0.2,0.2),
clip_on=True)
if saveplots == True:
plt.savefig("VolumetricPlotSample.jpg")
# Plot one group of data for volumetric presentation
def plot_volumetric_data(ax, w_data, d_data, f_data, depth_base=2, label='Depth',
labelpos=(0.2, 0.7), labelrot=0, type=1, fill=True, w_max=18, do_label=False, do_border=True,
x_size=1.0, y_size=1.0, zorder=1, offset_flag=False,
max_depth=0, suppress_low_fidelity=False):
# since data may come back out of order, save point at max y for annotation
i_anno = 0
x_anno = 0
y_anno = 0
# plot data rectangles
low_fidelity_count = True
last_y = -1
k = 0
# determine y-axis dimension for one pixel to use for offset of bars that start at 0
(_, dy) = get_pixel_dims(ax)
# do this loop in reverse to handle the case where earlier cells are overlapped by later cells
for i in reversed(range(len(d_data))):
x = depth_index(d_data[i], depth_base)
y = float(w_data[i])
f = f_data[i]
# each time we star a new row, reset the offset counter
# DEVNOTE: this is highly specialized for the QA area plots, where there are 8 bars
# that represent time starting from 0 secs. We offset by one pixel each and center the group
if y != last_y:
last_y = y;
k = 3 # hardcoded for 8 cells, offset by 3
#print(f"{i = } {x = } {y = }")
if max_depth > 0 and d_data[i] > max_depth:
#print(f"... excessive depth (2), skipped; w={y} d={d_data[i]}")
break;
# reject cells with low fidelity
if suppress_low_fidelity and f < suppress_low_fidelity_level:
if low_fidelity_count: break
else: low_fidelity_count = True
# the only time this is False is when doing merged gradation plots
if do_border == True:
# this case is for an array of x_sizes, i.e. each box has different width
if isinstance(x_size, list):
# draw each of the cells, with no offset
if not offset_flag:
ax.add_patch(box_at(x, y, f, type=type, fill=fill, x_size=x_size[i], y_size=y_size, zorder=zorder))
# use an offset for y value, AND account for x and width to draw starting at 0
else:
ax.add_patch(box_at((x/2 + x_size[i]/4), y + k*dy, f, type=type, fill=fill, x_size=x+ x_size[i]/2, y_size=y_size, zorder=zorder))
# this case is for only a single cell
else:
ax.add_patch(box_at(x, y, f, type=type, fill=fill, x_size=x_size, y_size=y_size))
# save the annotation point with the largest y value
if y >= y_anno:
x_anno = x
y_anno = y
i_anno = i
# move the next bar down (if using offset)
k -= 1
# if no data rectangles plotted, no need for a label
if x_anno == 0 or y_anno == 0:
return
x_annos.append(x_anno)
y_annos.append(y_anno)
anno_dist = math.sqrt( (y_anno - 1)**2 + (x_anno - 1)**2 )
# adjust radius of annotation circle based on maximum width of apps
anno_max = 10
if w_max > 10:
anno_max = 14
if w_max > 14:
anno_max = 18
scale = anno_max / anno_dist
# offset of text from end of arrow
if scale > 1:
x_anno_off = scale * x_anno - x_anno - 0.5
y_anno_off = scale * y_anno - y_anno
else:
x_anno_off = 0.7
y_anno_off = 0.5
x_anno_off += x_anno
y_anno_off += y_anno
# print(f"... {xx} {yy} {anno_dist}")
x_anno_offs.append(x_anno_off)
y_anno_offs.append(y_anno_off)
anno_labels.append(label)
if do_label:
ax.annotate(label, xy=(x_anno+labelpos[0], y_anno+labelpos[1]), rotation=labelrot,
horizontalalignment='left', verticalalignment='bottom', color=(0.2,0.2,0.2))
x_annos = []
y_annos = []
x_anno_offs = []
y_anno_offs = []
anno_labels = []
# init arrays to hold annotation points for label spreading
def vplot_anno_init ():
global x_annos, y_annos, x_anno_offs, y_anno_offs, anno_labels
x_annos = []
y_annos = []
x_anno_offs = []
y_anno_offs = []
anno_labels = []
# Number of ticks on volumetric depth axis
max_depth_log = 22
# average transpile factor between base QV depth and our depth based on results from QV notebook
QV_transpile_factor = 12.7
# format a number using K,M,B,T for large numbers, optionally rounding to 'digits' decimal places if num > 1
# (sign handling may be incorrect)
def format_number(num, digits=0):
if isinstance(num, str): num = float(num)
num = float('{:.3g}'.format(abs(num)))
sign = ''
metric = {'T': 1000000000000, 'B': 1000000000, 'M': 1000000, 'K': 1000, '': 1}
for index in metric:
num_check = num / metric[index]
if num_check >= 1:
num = round(num_check, digits)
sign = index
break
numstr = f"{str(num)}"
if '.' in numstr:
numstr = numstr.rstrip('0').rstrip('.')
return f"{numstr}{sign}"
# Return the color associated with the spcific value, using color map norm
def get_color(value):
# if there is a normalize function installed, scale the data
if cmap_norm:
value = float(cmap_norm(value))
if cmap == cmap_spectral:
value = 0.05 + value*0.9
elif cmap == cmap_blues:
value = 0.00 + value*1.0
else:
value = 0.0 + value*0.95
return cmap(value)
# Return the x and y equivalent to a single pixel for the given plot axis
def get_pixel_dims(ax):
# transform 0 -> 1 to pixel dimensions
pixdims = ax.transData.transform([(0,1),(1,0)])-ax.transData.transform((0,0))
xpix = pixdims[1][0]
ypix = pixdims[0][1]
#determine x- and y-axis dimension for one pixel
dx = (1 / xpix)
dy = (1 / ypix)
return (dx, dy)
############### Helper functions
# return the base index for a circuit depth value
# take the log in the depth base, and add 1
def depth_index(d, depth_base):
if depth_base <= 1:
return d
if d == 0:
return 0
return math.log(d, depth_base) + 1
# draw a box at x,y with various attributes
def box_at(x, y, value, type=1, fill=True, x_size=1.0, y_size=1.0, alpha=1.0, zorder=1):
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.5,0.5,0.5)
return Rectangle((x - (x_size/2), y - (y_size/2)), x_size, y_size,
alpha=alpha,
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.5*y_size,
zorder=zorder)
# draw a circle at x,y with various attributes
def circle_at(x, y, value, type=1, fill=True):
size = 1.0
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.5,0.5,0.5)
return Circle((x, y), size/2,
alpha = 0.7, # DEVNOTE: changed to 0.7 from 0.5, to handle only one cell
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.5)
def box4_at(x, y, value, type=1, fill=True, alpha=1.0):
size = 1.0
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.3,0.3,0.3)
ec = fc
return Rectangle((x - size/8, y - size/2), size/4, size,
alpha=alpha,
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.1)
# Draw a Quantum Volume rectangle with specified width and depth, and grey-scale value
def qv_box_at(x, y, qv_width, qv_depth, value, depth_base):
#print(f"{qv_width} {qv_depth} {depth_index(qv_depth, depth_base)}")
return Rectangle((x - 0.5, y - 0.5), depth_index(qv_depth, depth_base), qv_width,
edgecolor = (value,value,value),
facecolor = (value,value,value),
fill=True,
lw=1)
def bkg_box_at(x, y, value=0.9):
size = 0.6
return Rectangle((x - size/2, y - size/2), size, size,
edgecolor = (.75,.75,.75),
facecolor = (value,value,value),
fill=True,
lw=0.5)
def bkg_empty_box_at(x, y):
size = 0.6
return Rectangle((x - size/2, y - size/2), size, size,
edgecolor = (.75,.75,.75),
facecolor = (1.0,1.0,1.0),
fill=True,
lw=0.5)
# Plot the background for the volumetric analysis
def plot_volumetric_background(max_qubits=11, QV=32, depth_base=2, suptitle=None, avail_qubits=0, colorbar_label="Avg Result Fidelity"):
if suptitle == None:
suptitle = f"Volumetric Positioning\nCircuit Dimensions and Fidelity Overlaid on Quantum Volume = {QV}"
QV0 = QV
qv_estimate = False
est_str = ""
if QV == 0: # QV = 0 indicates "do not draw QV background or label"
QV = 2048
elif QV < 0: # QV < 0 indicates "add est. to label"
QV = -QV
qv_estimate = True
est_str = " (est.)"
if avail_qubits > 0 and max_qubits > avail_qubits:
max_qubits = avail_qubits
max_width = 13
if max_qubits > 11: max_width = 18
if max_qubits > 14: max_width = 20
if max_qubits > 16: max_width = 24
if max_qubits > 24: max_width = 33
#print(f"... {avail_qubits} {max_qubits} {max_width}")
plot_width = 6.8
plot_height = 0.5 + plot_width * (max_width / max_depth_log)
#print(f"... {plot_width} {plot_height}")
# define matplotlib figure and axis; use constrained layout to fit colorbar to right
fig, ax = plt.subplots(figsize=(plot_width, plot_height), constrained_layout=True)
plt.suptitle(suptitle)
plt.xlim(0, max_depth_log)
plt.ylim(0, max_width)
# circuit depth axis (x axis)
xbasis = [x for x in range(1,max_depth_log)]
xround = [depth_base**(x-1) for x in xbasis]
xlabels = [format_number(x) for x in xround]
ax.set_xlabel('Circuit Depth')
ax.set_xticks(xbasis)
plt.xticks(xbasis, xlabels, color='black', rotation=45, ha='right', va='top', rotation_mode="anchor")
# other label options
#plt.xticks(xbasis, xlabels, color='black', rotation=-60, ha='left')
#plt.xticks(xbasis, xlabels, color='black', rotation=-45, ha='left', va='center', rotation_mode="anchor")
# circuit width axis (y axis)
ybasis = [y for y in range(1,max_width)]
yround = [1,2,3,4,5,6,7,8,10,12,15] # not used now
ylabels = [str(y) for y in yround] # not used now
#ax.set_ylabel('Circuit Width (Number of Qubits)')
ax.set_ylabel('Circuit Width')
ax.set_yticks(ybasis)
#create simple line plot (not used right now)
#ax.plot([0, 10],[0, 10])
log2QV = math.log2(QV)
QV_width = log2QV
QV_depth = log2QV * QV_transpile_factor
# show a quantum volume rectangle of QV = 64 e.g. (6 x 6)
if QV0 != 0:
ax.add_patch(qv_box_at(1, 1, QV_width, QV_depth, 0.87, depth_base))
else:
ax.add_patch(qv_box_at(1, 1, QV_width, QV_depth, 0.91, depth_base))
# the untranspiled version is commented out - we do not show this by default
# also show a quantum volume rectangle un-transpiled
# ax.add_patch(qv_box_at(1, 1, QV_width, QV_width, 0.80, depth_base))
# show 2D array of volumetric cells based on this QV_transpiled
# DEVNOTE: we use +1 only to make the visuals work; s/b without
# Also, the second arg of the min( below seems incorrect, needs correction
maxprod = (QV_width + 1) * (QV_depth + 1)
for w in range(1, min(max_width, round(QV) + 1)):
# don't show VB squares if width greater than known available qubits
if avail_qubits != 0 and w > avail_qubits:
continue
i_success = 0
for d in xround:
# polarization factor for low circuit widths
maxtest = maxprod / ( 1 - 1 / (2**w) )
# if circuit would fail here, don't draw box
if d > maxtest: continue
if w * d > maxtest: continue
# guess for how to capture how hardware decays with width, not entirely correct
# # reduce maxtext by a factor of number of qubits > QV_width
# # just an approximation to account for qubit distances
# if w > QV_width:
# over = w - QV_width
# maxtest = maxtest / (1 + (over/QV_width))
# draw a box at this width and depth
id = depth_index(d, depth_base)
# show vb rectangles; if not showing QV, make all hollow (or less dark)
if QV0 == 0:
#ax.add_patch(bkg_empty_box_at(id, w))
ax.add_patch(bkg_box_at(id, w, 0.95))
else:
ax.add_patch(bkg_box_at(id, w, 0.9))
# save index of last successful depth
i_success += 1
# plot empty rectangle after others
d = xround[i_success]
id = depth_index(d, depth_base)
ax.add_patch(bkg_empty_box_at(id, w))
# Add annotation showing quantum volume
if QV0 != 0:
t = ax.text(max_depth_log - 2.0, 1.5, f"QV{est_str}={QV}", size=12,
horizontalalignment='right', verticalalignment='center', color=(0.2,0.2,0.2),
bbox=dict(boxstyle="square,pad=0.3", fc=(.9,.9,.9), ec="grey", lw=1))
# add colorbar to right of plot
plt.colorbar(cm.ScalarMappable(cmap=cmap), cax=None, ax=ax,
shrink=0.6, label=colorbar_label, panchor=(0.0, 0.7))
return ax
# Function to calculate circuit depth
def calculate_circuit_depth(qc):
# Calculate the depth of the circuit
depth = qc.depth()
return depth
def calculate_transpiled_depth(qc,basis_selector):
# use either the backend or one of the basis gate sets
if basis_selector == 0:
qc = transpile(qc, backend)
else:
basis_gates = basis_gates_array[basis_selector]
qc = transpile(qc, basis_gates=basis_gates, seed_transpiler=0)
transpiled_depth = qc.depth()
return transpiled_depth,qc
def plot_fidelity_data(fidelity_data, Hf_fidelity_data, title):
avg_fidelity_means = []
avg_Hf_fidelity_means = []
avg_num_qubits_values = list(fidelity_data.keys())
# Calculate the average fidelity and Hamming fidelity for each unique number of qubits
for num_qubits in avg_num_qubits_values:
avg_fidelity = np.average(fidelity_data[num_qubits])
avg_fidelity_means.append(avg_fidelity)
avg_Hf_fidelity = np.mean(Hf_fidelity_data[num_qubits])
avg_Hf_fidelity_means.append(avg_Hf_fidelity)
return avg_fidelity_means,avg_Hf_fidelity_means
list_of_gates = []
def list_of_standardgates():
import qiskit.circuit.library as lib
from qiskit.circuit import Gate
import inspect
# List all the attributes of the library module
gate_list = dir(lib)
# Filter out non-gate classes (like functions, variables, etc.)
gates = [gate for gate in gate_list if isinstance(getattr(lib, gate), type) and issubclass(getattr(lib, gate), Gate)]
# Get method names from QuantumCircuit
circuit_methods = inspect.getmembers(QuantumCircuit, inspect.isfunction)
method_names = [name for name, _ in circuit_methods]
# Map gate class names to method names
gate_to_method = {}
for gate in gates:
gate_class = getattr(lib, gate)
class_name = gate_class.__name__.replace('Gate', '').lower() # Normalize class name
for method in method_names:
if method == class_name or method == class_name.replace('cr', 'c-r'):
gate_to_method[gate] = method
break
# Add common operations that are not strictly gates
additional_operations = {
'Measure': 'measure',
'Barrier': 'barrier',
}
gate_to_method.update(additional_operations)
for k,v in gate_to_method.items():
list_of_gates.append(v)
def update_counts(gates,custom_gates):
operations = {}
for key, value in gates.items():
operations[key] = value
for key, value in custom_gates.items():
if key in operations:
operations[key] += value
else:
operations[key] = value
return operations
def get_gate_counts(gates,custom_gate_defs):
result = gates.copy()
# Iterate over the gate counts in the quantum circuit
for gate, count in gates.items():
if gate in custom_gate_defs:
custom_gate_ops = custom_gate_defs[gate]
# Multiply custom gate operations by the count of the custom gate in the circuit
for _ in range(count):
result = update_counts(result, custom_gate_ops)
# Remove the custom gate entry as we have expanded it
del result[gate]
return result
dict_of_qc = dict()
custom_gates_defs = dict()
# Function to count operations recursively
def count_operations(qc):
dict_of_qc.clear()
circuit_traverser(qc)
operations = dict()
operations = dict_of_qc[qc.name]
del dict_of_qc[qc.name]
# print("operations :",operations)
# print("dict_of_qc :",dict_of_qc)
for keys in operations.keys():
if keys not in list_of_gates:
for k,v in dict_of_qc.items():
if k in operations.keys():
custom_gates_defs[k] = v
operations=get_gate_counts(operations,custom_gates_defs)
custom_gates_defs.clear()
return operations
def circuit_traverser(qc):
dict_of_qc[qc.name]=dict(qc.count_ops())
for i in qc.data:
if str(i.operation.name) not in list_of_gates:
qc_1 = i.operation.definition
circuit_traverser(qc_1)
def get_memory():
import resource
usage = resource.getrusage(resource.RUSAGE_SELF)
max_mem = usage.ru_maxrss/1024 #in MB
return max_mem
def analyzer(qc,references,num_qubits):
# total circuit name (pauli string + coefficient)
total_name = qc.name
# pauli string
pauli_string = total_name.split()[0]
# get the correct measurement
if (len(total_name.split()) == 2):
correct_dist = references[pauli_string]
else:
circuit_id = int(total_name.split()[2])
correct_dist = references[f"Qubits - {num_qubits} - {circuit_id}"]
return correct_dist,total_name
# Max qubits must be 12 since the referenced files only go to 12 qubits
MAX_QUBITS = 12
method = 1
def run (min_qubits=min_qubits, max_qubits=max_qubits, skip_qubits=2,
max_circuits=max_circuits, num_shots=num_shots):
creation_times = []
elapsed_times = []
quantum_times = []
circuit_depths = []
transpiled_depths = []
fidelity_data = {}
Hf_fidelity_data = {}
numckts = []
mem_usage = []
algorithmic_1Q_gate_counts = []
algorithmic_2Q_gate_counts = []
transpiled_1Q_gate_counts = []
transpiled_2Q_gate_counts = []
print(f"{benchmark_name} Benchmark Program - {platform}")
#defining all the standard gates supported by qiskit in a list
if gate_counts_plots == True:
list_of_standardgates()
max_qubits = max(max_qubits, min_qubits) # max must be >= min
# validate parameters (smallest circuit is 4 qubits and largest is 10 qubits)
max_qubits = min(max_qubits, MAX_QUBITS)
min_qubits = min(max(4, min_qubits), max_qubits)
if min_qubits % 2 == 1: min_qubits += 1 # min_qubits must be even
skip_qubits = max(1, skip_qubits)
if method == 2: max_circuits = 1
if max_qubits < 4:
print(f"Max number of qubits {max_qubits} is too low to run method {method} of VQE algorithm")
return
global max_ckts
max_ckts = max_circuits
global min_qbits,max_qbits,skp_qubits
min_qbits = min_qubits
max_qbits = max_qubits
skp_qubits = skip_qubits
print(f"min, max qubits = {min_qubits} {max_qubits}")
# Execute Benchmark Program N times for multiple circuit sizes
for input_size in range(min_qubits, max_qubits + 1, skip_qubits):
# reset random seed
np.random.seed(0)
# determine the number of circuits to execute for this group
num_circuits = min(3, max_circuits)
num_qubits = input_size
fidelity_data[num_qubits] = []
Hf_fidelity_data[num_qubits] = []
# decides number of electrons
na = int(num_qubits/4)
nb = int(num_qubits/4)
# random seed
np.random.seed(0)
numckts.append(num_circuits)
# create the circuit for given qubit size and simulation parameters, store time metric
ts = time.time()
# circuit list
qc_list = []
# Method 1 (default)
if method == 1:
# loop over circuits
for circuit_id in range(num_circuits):
# construct circuit
qc_single = VQEEnergy(num_qubits, na, nb, circuit_id, method)
qc_single.name = qc_single.name + " " + str(circuit_id)
# add to list
qc_list.append(qc_single)
# method 2
elif method == 2:
# construct all circuits
qc_list = VQEEnergy(num_qubits, na, nb, 0, method)
print(qc_list)
print(f"************\nExecuting [{len(qc_list)}] circuits with num_qubits = {num_qubits}")
for qc in qc_list:
print("*********************************************")
#print(f"qc of {qc} qubits for qc_list value: {qc_list}")
# get circuit id
if method == 1:
circuit_id = qc.name.split()[2]
else:
circuit_id = qc.name.split()[0]
#creation time
creation_time = time.time() - ts
creation_times.append(creation_time)
#print(qc)
print(f"creation time = {creation_time*1000} ms")
# Calculate gate count for the algorithmic circuit (excluding barriers and measurements)
if gate_counts_plots == True:
operations = count_operations(qc)
n1q = 0; n2q = 0
if operations != None:
for key, value in operations.items():
if key == "measure": continue
if key == "barrier": continue
if key.startswith("c") or key.startswith("mc"):
n2q += value
else:
n1q += value
print("operations: ",operations)
algorithmic_1Q_gate_counts.append(n1q)
algorithmic_2Q_gate_counts.append(n2q)
# collapse the sub-circuit levels used in this benchmark (for qiskit)
qc=qc.decompose()
#print(qc)
# Calculate circuit depth
depth = calculate_circuit_depth(qc)
circuit_depths.append(depth)
# Calculate transpiled circuit depth
transpiled_depth,qc = calculate_transpiled_depth(qc,basis_selector)
transpiled_depths.append(transpiled_depth)
#print(qc)
print(f"Algorithmic Depth = {depth} and Normalized Depth = {transpiled_depth}")
if gate_counts_plots == True:
# Calculate gate count for the transpiled circuit (excluding barriers and measurements)
tr_ops = qc.count_ops()
#print("tr_ops = ",tr_ops)
tr_n1q = 0; tr_n2q = 0
if tr_ops != None:
for key, value in tr_ops.items():
if key == "measure": continue
if key == "barrier": continue
if key.startswith("c"): tr_n2q += value
else: tr_n1q += value
transpiled_1Q_gate_counts.append(tr_n1q)
transpiled_2Q_gate_counts.append(tr_n2q)
print(f"Algorithmic 1Q gates = {n1q} ,Algorithmic 2Q gates = {n2q}")
print(f"Normalized 1Q gates = {tr_n1q} ,Normalized 2Q gates = {tr_n2q}")
#execution
if Type_of_Simulator == "built_in":
#To check if Noise is required
if Noise_Inclusion == True:
noise_model = noise_parameters
else:
noise_model = None
ts = time.time()
job = execute(qc, backend, shots=num_shots, noise_model=noise_model)
elif Type_of_Simulator == "FAKE" or Type_of_Simulator == "FAKEV2" :
ts = time.time()
job = backend.run(qc,shots=num_shots, noise_model=noise_parameters)
#retrieving the result
result = job.result()
#print(result)
#calculating elapsed time
elapsed_time = time.time() - ts
elapsed_times.append(elapsed_time)
# Calculate quantum processing time
quantum_time = result.time_taken
quantum_times.append(quantum_time)
print(f"Elapsed time = {elapsed_time*1000} ms and Quantum Time = {quantum_time*1000} ms")
#counts in result object
counts = result.get_counts()
# load pre-computed data
if len(qc.name.split()) == 2:
filename = os.path.join(f'_common/precalculated_data_{num_qubits}_qubit.json')
with open(filename) as f:
references = json.load(f)
else:
filename = os.path.join(f'_common/precalculated_data_{num_qubits}_qubit_method2.json')
with open(filename) as f:
references = json.load(f)
#Correct distribution to compare with counts
correct_dist,total_name = analyzer(qc,references,num_qubits)
#fidelity calculation comparision of counts and correct_dist
fidelity_dict = polarization_fidelity(counts, correct_dist)
print(fidelity_dict)
# modify fidelity based on the coefficient
if (len(total_name.split()) == 2):
fidelity_dict *= ( abs(float(total_name.split()[1])) / normalization )
fidelity_data[num_qubits].append(fidelity_dict['fidelity'])
Hf_fidelity_data[num_qubits].append(fidelity_dict['hf_fidelity'])
#maximum memory utilization (if required)
if Memory_utilization_plot == True:
max_mem = get_memory()
print(f"Maximum Memory Utilized: {max_mem} MB")
mem_usage.append(max_mem)
print("*********************************************")
##########
# print a sample circuit
print("Sample Circuit:"); print(QC_ if QC_ != None else " ... too large!")
print("\nHartree Fock Generator 'Hf' ="); print(Hf_ if Hf_ != None else " ... too large!")
print("\nCluster Operator Example 'Cluster Op' ="); print(CO_ if CO_ != None else " ... too large!")
return (creation_times, elapsed_times, quantum_times, circuit_depths, transpiled_depths,
fidelity_data, Hf_fidelity_data, numckts , algorithmic_1Q_gate_counts, algorithmic_2Q_gate_counts,
transpiled_1Q_gate_counts, transpiled_2Q_gate_counts,mem_usage)
# Execute the benchmark program, accumulate metrics, and calculate circuit depths
(creation_times, elapsed_times, quantum_times, circuit_depths,transpiled_depths, fidelity_data, Hf_fidelity_data, numckts,
algorithmic_1Q_gate_counts, algorithmic_2Q_gate_counts, transpiled_1Q_gate_counts, transpiled_2Q_gate_counts,mem_usage) = run()
# Define the range of qubits for the x-axis
num_qubits_range = range(min_qbits, max_qbits+1,skp_qubits)
print("num_qubits_range =",num_qubits_range)
# Calculate average creation time, elapsed time, quantum processing time, and circuit depth for each number of qubits
avg_creation_times = []
avg_elapsed_times = []
avg_quantum_times = []
avg_circuit_depths = []
avg_transpiled_depths = []
avg_1Q_algorithmic_gate_counts = []
avg_2Q_algorithmic_gate_counts = []
avg_1Q_Transpiled_gate_counts = []
avg_2Q_Transpiled_gate_counts = []
max_memory = []
start = 0
for num in numckts:
avg_creation_times.append(np.mean(creation_times[start:start+num]))
avg_elapsed_times.append(np.mean(elapsed_times[start:start+num]))
avg_quantum_times.append(np.mean(quantum_times[start:start+num]))
avg_circuit_depths.append(np.mean(circuit_depths[start:start+num]))
avg_transpiled_depths.append(np.mean(transpiled_depths[start:start+num]))
if gate_counts_plots == True:
avg_1Q_algorithmic_gate_counts.append(int(np.mean(algorithmic_1Q_gate_counts[start:start+num])))
avg_2Q_algorithmic_gate_counts.append(int(np.mean(algorithmic_2Q_gate_counts[start:start+num])))
avg_1Q_Transpiled_gate_counts.append(int(np.mean(transpiled_1Q_gate_counts[start:start+num])))
avg_2Q_Transpiled_gate_counts.append(int(np.mean(transpiled_2Q_gate_counts[start:start+num])))
if Memory_utilization_plot == True:max_memory.append(np.max(mem_usage[start:start+num]))
start += num
# Calculate the fidelity data
avg_f, avg_Hf = plot_fidelity_data(fidelity_data, Hf_fidelity_data, "Fidelity Comparison")
# Plot histograms for average creation time, average elapsed time, average quantum processing time, and average circuit depth versus the number of qubits
# Add labels to the bars
def autolabel(rects,ax,str='{:.3f}',va='top',text_color="black"):
for rect in rects:
height = rect.get_height()
ax.annotate(str.format(height), # Formatting to two decimal places
xy=(rect.get_x() + rect.get_width() / 2, height / 2),
xytext=(0, 0),
textcoords="offset points",
ha='center', va=va,color=text_color,rotation=90)
bar_width = 0.3
# Determine the number of subplots and their arrangement
if Memory_utilization_plot and gate_counts_plots:
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7) = plt.subplots(7, 1, figsize=(18, 30))
# Plotting for both memory utilization and gate counts
# ax1, ax2, ax3, ax4, ax5, ax6, ax7 are available
elif Memory_utilization_plot:
fig, (ax1, ax2, ax3, ax6, ax7) = plt.subplots(5, 1, figsize=(18, 30))
# Plotting for memory utilization only
# ax1, ax2, ax3, ax6, ax7 are available
elif gate_counts_plots:
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, figsize=(18, 30))
# Plotting for gate counts only
# ax1, ax2, ax3, ax4, ax5, ax6 are available
else:
fig, (ax1, ax2, ax3, ax6) = plt.subplots(4, 1, figsize=(18, 30))
# Default plotting
# ax1, ax2, ax3, ax6 are available
fig.suptitle(f"General Benchmarks : {platform} - {benchmark_name}", fontsize=16)
for i in range(len(avg_creation_times)): #converting seconds to milli seconds by multiplying 1000
avg_creation_times[i] *= 1000
ax1.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
x = ax1.bar(num_qubits_range, avg_creation_times, color='deepskyblue')
autolabel(ax1.patches, ax1)
ax1.set_xlabel('Number of Qubits')
ax1.set_ylabel('Average Creation Time (ms)')
ax1.set_title('Average Creation Time vs Number of Qubits',fontsize=14)
ax2.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
for i in range(len(avg_elapsed_times)): #converting seconds to milli seconds by multiplying 1000
avg_elapsed_times[i] *= 1000
for i in range(len(avg_quantum_times)): #converting seconds to milli seconds by multiplying 1000
avg_quantum_times[i] *= 1000
Elapsed= ax2.bar(np.array(num_qubits_range) - bar_width / 2, avg_elapsed_times, width=bar_width, color='cyan', label='Elapsed Time')
Quantum= ax2.bar(np.array(num_qubits_range) + bar_width / 2, avg_quantum_times,width=bar_width, color='deepskyblue',label ='Quantum Time')
autolabel(Elapsed,ax2,str='{:.1f}')
autolabel(Quantum,ax2,str='{:.1f}')
ax2.set_xlabel('Number of Qubits')
ax2.set_ylabel('Average Time (ms)')
ax2.set_title('Average Time vs Number of Qubits')
ax2.legend()
ax3.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized = ax3.bar(np.array(num_qubits_range) - bar_width / 2, avg_transpiled_depths, color='cyan', label='Normalized Depth', width=bar_width) # Adjust width here
Algorithmic = ax3.bar(np.array(num_qubits_range) + bar_width / 2,avg_circuit_depths, color='deepskyblue', label='Algorithmic Depth', width=bar_width) # Adjust width here
autolabel(Normalized,ax3,str='{:.2f}')
autolabel(Algorithmic,ax3,str='{:.2f}')
ax3.set_xlabel('Number of Qubits')
ax3.set_ylabel('Average Circuit Depth')
ax3.set_title('Average Circuit Depth vs Number of Qubits')
ax3.legend()
if gate_counts_plots == True:
ax4.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized_1Q_counts = ax4.bar(np.array(num_qubits_range) - bar_width / 2, avg_1Q_Transpiled_gate_counts, color='cyan', label='Normalized Gate Counts', width=bar_width) # Adjust width here
Algorithmic_1Q_counts = ax4.bar(np.array(num_qubits_range) + bar_width / 2, avg_1Q_algorithmic_gate_counts, color='deepskyblue', label='Algorithmic Gate Counts', width=bar_width) # Adjust width here
autolabel(Normalized_1Q_counts,ax4,str='{}')
autolabel(Algorithmic_1Q_counts,ax4,str='{}')
ax4.set_xlabel('Number of Qubits')
ax4.set_ylabel('Average 1-Qubit Gate Counts')
ax4.set_title('Average 1-Qubit Gate Counts vs Number of Qubits')
ax4.legend()
ax5.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized_2Q_counts = ax5.bar(np.array(num_qubits_range) - bar_width / 2, avg_2Q_Transpiled_gate_counts, color='cyan', label='Normalized Gate Counts', width=bar_width) # Adjust width here
Algorithmic_2Q_counts = ax5.bar(np.array(num_qubits_range) + bar_width / 2, avg_2Q_algorithmic_gate_counts, color='deepskyblue', label='Algorithmic Gate Counts', width=bar_width) # Adjust width here
autolabel(Normalized_2Q_counts,ax5,str='{}')
autolabel(Algorithmic_2Q_counts,ax5,str='{}')
ax5.set_xlabel('Number of Qubits')
ax5.set_ylabel('Average 2-Qubit Gate Counts')
ax5.set_title('Average 2-Qubit Gate Counts vs Number of Qubits')
ax5.legend()
ax6.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Hellinger = ax6.bar(np.array(num_qubits_range) - bar_width / 2, avg_Hf, width=bar_width, label='Hellinger Fidelity',color='cyan') # Adjust width here
Normalized = ax6.bar(np.array(num_qubits_range) + bar_width / 2, avg_f, width=bar_width, label='Normalized Fidelity', color='deepskyblue') # Adjust width here
autolabel(Hellinger,ax6,str='{:.2f}')
autolabel(Normalized,ax6,str='{:.2f}')
ax6.set_xlabel('Number of Qubits')
ax6.set_ylabel('Average Value')
ax6.set_title("Fidelity Comparison")
ax6.legend()
if Memory_utilization_plot == True:
ax7.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
x = ax7.bar(num_qubits_range, max_memory, color='turquoise', width=bar_width, label="Memory Utilizations")
autolabel(ax7.patches, ax7)
ax7.set_xlabel('Number of Qubits')
ax7.set_ylabel('Maximum Memory Utilized (MB)')
ax7.set_title('Memory Utilized vs Number of Qubits',fontsize=14)
plt.tight_layout(rect=[0, 0, 1, 0.96])
if saveplots == True:
plt.savefig("ParameterPlotsSample.jpg")
plt.show()
# Quantum Volume Plot
Suptitle = f"Volumetric Positioning - {platform}"
appname=benchmark_name
if QV_ == None:
QV=2048
else:
QV=QV_
depth_base =2
ax = plot_volumetric_background(max_qubits=max_qbits, QV=QV,depth_base=depth_base, suptitle=Suptitle, colorbar_label="Avg Result Fidelity")
w_data = num_qubits_range
# determine width for circuit
w_max = 0
for i in range(len(w_data)):
y = float(w_data[i])
w_max = max(w_max, y)
d_tr_data = avg_transpiled_depths
f_data = avg_f
plot_volumetric_data(ax, w_data, d_tr_data, f_data, depth_base, fill=True,label=appname, labelpos=(0.4, 0.6), labelrot=15, type=1, w_max=w_max)
anno_volumetric_data(ax, depth_base,label=appname, labelpos=(0.4, 0.6), labelrot=15, type=1, fill=False)
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
!pip install qiskit torch torchvision matplotlib
!pip install qiskit-machine-learning
!pip install torchviz
!pip install qiskit[all]
!pip install qiskit == 0.45.2
!pip install qiskit_algorithms == 0.7.1
!pip install qiskit-ibm-runtime == 0.17.0
!pip install qiskit-aer == 0.13.2
#Quentum net draw
!pip install pylatexenc
# PyTorch
import torch
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import Module, Conv2d, Linear, Dropout2d, CrossEntropyLoss
import torch.nn.functional as F
from torchviz import make_dot
from torch import Tensor
from torch import cat
# Qiskit
from qiskit import Aer
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks.estimator_qnn import EstimatorQNN
from qiskit_machine_learning.circuit.library import QNNCircuit
from google.colab import drive
drive.mount('/content/drive')
# Visualization
import matplotlib.pyplot as plt
import numpy as np
# Folder direction
train_data = datasets.ImageFolder('/content/drive/MyDrive/QCNN/Data-set/Train', transform=transforms.Compose([transforms.ToTensor()]))
test_data = datasets.ImageFolder('/content/drive/MyDrive/QCNN/Data-set/Test', transform=transforms.Compose([transforms.ToTensor()]))
#e train and test Tensor size
print(f"Data tensor Dimension:",train_data[0][0].shape)
#Convert to DataLoader
train_loader = DataLoader(train_data, shuffle=True, batch_size=1)
test_loader = DataLoader(test_data, shuffle=True, batch_size=1)
#Show the labels
print((train_loader.dataset.class_to_idx))
n_samples_show = 5
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 10))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap=plt.cm.rainbow)
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title(f"Labeled: {targets[0].item()}")
n_samples_show -= 1
# batch size
batch_size = 10
# Quantum Neural Networ model
def create_qnn():
qnn_circuit = QNNCircuit(2)
qnn = EstimatorQNN(circuit=qnn_circuit)
return qnn
qnn = create_qnn()
print(qnn.circuit)
# Calcular dinΓ‘micamente el tamaΓ±o de entrada para fc1
def get_conv_output_size(model, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = model._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
#Definimos red neuronal en PyTorch
class Net(Module):
def __init__(self, qnn):
super(Net, self).__init__()
self.conv1 = Conv2d(3, 24, kernel_size=5)
self.conv2 = Conv2d(24, 48, kernel_size=5)
self.dropout = Dropout2d()
# Calcular dinΓ‘micamente el tamaΓ±o de entrada para fc1
#self.conv_output_size = self._get_conv_output_size((3, 432, 432))
self.fc1 = Linear(529200, 512) # Reducir el nΓΊmero de neuronas en fc1
self.fc2 = Linear(512, 2) # Salida 2 para dos clases
self.qnn = TorchConnector(qnn)
self.fc3 = Linear(1, 1) # Salida 2 para dos clases
def _get_conv_output_size(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
print("TamaΓ±o calculado:", n_size)
return n_size
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # Aplicamos la red cuΓ‘ntica nuevamente en la secciΓ³n forward
x = self.fc3(x)
return cat((x, 1 - x), -1)
# Crea una instancia del modelo
model = Net(qnn)
# Imprimir el modelo
print(model)
print(f"Device: {next(model.parameters()).device}")
#dummy_tensor = next(iter(train_loader))[0].to('cuda')
dummy_tensor = next(iter(train_loader))[0]
output = model(dummy_tensor)
params = dict(list(model.named_parameters()))
# Concatenamos los tensores utilizando torch.cat en lugar de cat
concatenated_output = torch.cat((output, 1 - output), -1)
make_dot(concatenated_output, params=params).render("rnn_torchviz", format="png")
# Definimos optimizador y funciΓ³n de pΓ©rdida
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = CrossEntropyLoss()
# Empezamos entrenamiento
epochs = 3 # NΓΊmero de Γ©pocas
loss_list = []
model.train() # Modelo en modo entrenamiento
for epoch in range(epochs):
correct = 0
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Se inicializa gradiente
output = model(data)
loss = loss_func(output, target)
loss.backward() # Backward pass
optimizer.step() # Optimizamos pesos
total_loss.append(loss.item()) # CΓ‘lculo de la funciΓ³n de pΓ©rdida
train_pred = output.argmax(dim=1, keepdim=True)
correct += train_pred.eq(target.view_as(train_pred)).sum().item()
loss_list.append(sum(total_loss) / len(total_loss))
accuracy = 100 * correct / len(train_loader) #CΓ‘lculo de precisiΓ³n
print(f"Training [{100.0 * (epoch + 1) / epochs:.0f}%]\tLoss: {loss_list[-1]:.4f}\tAccuracy: {accuracy:.2f}%")
|
https://github.com/PabloMartinezAngerosa/QAOA-uniform-convergence
|
PabloMartinezAngerosa
|
from tsp_qaoa import test_solution
from qiskit.visualization import plot_histogram
import networkx as nx
import numpy as np
import json
import csv
# Array of JSON Objects
# Sort the JSON data based on the value of the brand key
UNIFORM_CONVERGENCE_SAMPLE.sort(key=lambda x: x["mean"])
# genera las distancias
index = -1
for sample in UNIFORM_CONVERGENCE_SAMPLE:
mean = sample["mean"]
index += 1
distance_p_ground_state = np.max(np.abs(UNIFORM_CONVERGENCE_SAMPLE[0]["probabilities"] - sample["probabilities"]))
UNIFORM_CONVERGENCE_SAMPLE[index]["distance_pgs"] = distance_p_ground_state
header = ['instance', 'iteration', 'distance']
length_instances = 2
with open('qaoa_multiple_distance.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
# write the header
writer.writerow(header)
for i in range(length_instances):
job_2, G, UNIFORM_CONVERGENCE_SAMPLE = test_solution()
iteration = 0
for sample in UNIFORM_CONVERGENCE_SAMPLE:
iteration += 1
mean = sample["mean"]
distance = sample["distance_pgs"]
state = 0
for probability in sample["probabilities"]:
state += 1
# write the data
data = [iteration, state, probability, mean]
writer.writerow(data)
writer_q.writerow([iteration, distance])
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
# Checking the version of PYTHON; we only support 3 at the moment
import sys
if sys.version_info < (3,0):
raise Exception('Please use Python version 3 or greater.')
# useful additional packages
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import time
from pprint import pprint
# importing the QISKit
from qiskit import QuantumCircuit, QuantumProgram
#import Qconfig
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
QPS_SPECS = {
'circuits': [{
'name': 'W_states',
'quantum_registers': [{
'name':'q',
'size':5
}],
'classical_registers': [{
'name':'c',
'size':5
}]}],
}
"Choice of the backend"
# The flag_qx2 must be "True" for using the ibmqx2.
# "True" is also better when using the simulator
#backend = 'ibmqx2' # not advisable if other pending jobs!
#backend = 'ibmqx4' # not advisable if other pending jobs!
backend = 'local_qasm_simulator' #OK
#backend = 'ibmqx_hpc_qasm_simulator' #OK
flag_qx2 = True
if backend == 'ibmqx4':
flag_qx2 = False
print("Your choice for the backend is: ", backend, "flag_qx2 is: ", flag_qx2)
# Here, two useful routine
# Define a F_gate
def F_gate(circ,q,i,j,n,k) :
theta = np.arccos(np.sqrt(1/(n-k+1)))
circ.ry(-theta,q[j])
circ.cz(q[i],q[j])
circ.ry(theta,q[j])
circ.barrier(q[i])
# Define the cxrv gate which uses reverse CNOT instead of CNOT
def cxrv(circ,q,i,j) :
circ.h(q[i])
circ.h(q[j])
circ.cx(q[j],q[i])
circ.h(q[i])
circ.h(q[j])
circ.barrier(q[i],q[j])
# 3-qubit W state
Q_program = QuantumProgram(specs=QPS_SPECS)
#Q_program.set_api(Qconfig.APItoken, Qconfig.config['url'])
W_states = Q_program.get_circuit('W_states')
q = Q_program.get_quantum_register('q')
c = Q_program.get_classical_register('c')
W_states.x(q[2]) #start is |100>
F_gate(W_states,q,2,1,3,1) # Applying F12
F_gate(W_states,q,1,0,3,2) # Applying F23
if flag_qx2 : # option ibmqx2
W_states.cx(q[1],q[2]) # cNOT 21
W_states.cx(q[0],q[1]) # cNOT 32
else : # option ibmqx4
cxrv(W_states,q,1,2)
cxrv(W_states,q,0,1)
# Coin tossing
W_states.h(q[3])
W_states.h(q[4])
for i in range(5) :
W_states.measure(q[i] , c[i])
circuits = ['W_states']
"Dotted alphabet"
top_bottom = "βββββββββββββββ"
blank = "β β"
chosen = []
chosen = chosen + ["βββββββββββββββ"]
chosen = chosen + ["βββββββββββ ββ"]
chosen = chosen + ["ββββββββββ βββ"]
chosen = chosen + ["βββββββββ ββββ"]
chosen = chosen + ["ββββββββ βββββ"]
chosen = chosen + ["β ββββ ββββββ"]
chosen = chosen + ["ββ ββ βββββββ"]
chosen = chosen + ["βββ ββββββββ"]
chosen = chosen + ["ββββ βββββββββ"]
chosen = chosen + ["βββββββββββββββ"]
here_left = []
here_left = here_left + ["βββββββββββββββ"]
here_left = here_left + ["βββββββββββββββ"]
here_left = here_left + ["βββ βββββββββ"]
here_left = here_left + ["βββ βββββββββ"]
here_left = here_left + ["βββ βββββββββ"]
here_left = here_left + ["βββ βββββββββ"]
here_left = here_left + ["βββ βββββββββ"]
here_left = here_left + ["βββ ββββ"]
here_left = here_left + ["βββββββββββββββ"]
here_left = here_left + ["βββββββββββββββ"]
here_center = []
here_center = here_center + ["βββββββββββββββ"]
here_center = here_center + ["βββββββββββββββ"]
here_center = here_center + ["βββββ ββββ"]
here_center = here_center + ["βββ βββββββββ"]
here_center = here_center + ["βββ βββββββββ"]
here_center = here_center + ["βββ βββββββββ"]
here_center = here_center + ["βββ βββββββββ"]
here_center = here_center + ["βββββ ββββ"]
here_center = here_center + ["βββββββββββββββ"]
here_center = here_center + ["βββββββββββββββ"]
here_right = []
here_right = here_right + ["βββββββββββββββ"]
here_right = here_right + ["βββββββββββββββ"]
here_right = here_right + ["βββ βββββ"]
here_right = here_right + ["βββ βββ βββ"]
here_right = here_right + ["βββ βββ βββ"]
here_right = here_right + ["βββ βββββ"]
here_right = here_right + ["βββ ββ ββββ"]
here_right = here_right + ["βββ βββ βββ"]
here_right = here_right + ["βββββββββββββββ"]
here_right = here_right + ["βββββββββββββββ"]
goa=["β β","β ( ) β","β ( ) β","β / O O \ β","β )|( β","β @ β","β = β","β Y β","β β"]
car=["β β","β _______ β","β / \ β","β Β° _______ Β° β","β / \ β","β (O) ### (O) β","β =+=====+= β","β || || β","β β"]
"(RE)INITIATES STATISTICS"
nb_randomnb = 0
nb_left = 0
nb_center = 0
nb_right = 0
nb_switches = 0
nb_stays = 0
nb_won_switching = 0
nb_won_sticking = 0
nb_games = 0
n_won = 0
"HERE START THE GAME"
"Hiding the car and the two goats behind the three doors"
Label = ["left", "central", "right"]
shots = 1
repeat = "Y"
while repeat == "Y":
nb_of_cars = 4
while nb_of_cars != 1:
result = Q_program.execute(circuits, backend=backend, shots=shots, max_credits=5, wait=5, timeout=600)
c5str = str(result.get_counts('W_states'))
nb_of_cars = int(c5str[4]) + int(c5str[5]) + int(c5str[6])
#this is for checking results from the real computer:
if nb_of_cars == 0:
print("They managed to hide three goats and no car behind the doors! Restarting the hiding process...")
if nb_of_cars >= 2:
print("They managed to hide", nb_of_cars, "cars behind the doors! Restarting the hiding process...")
print(top_bottom," ",top_bottom," ",top_bottom)
for i in range(9):
print(here_left[i]," ",here_center[i]," ",here_right[i])
print(top_bottom," ",top_bottom," ",top_bottom,"\n")
door = input("Game master: Your choice? letter l: left door, c: central door, r: right door + enter\n").upper()
picl = here_left
picc = here_center
picr = here_right
if (door == "L"):
Doorchosen = 1
nb_left = nb_left + 1
picl = chosen
else:
if (door == "C"):
Doorchosen = 2
nb_center = nb_center + 1
picc=chosen
else:
Doorchosen = 3
nb_right = nb_right + 1
picr = chosen
print('Game master: Your choice was the',Label[Doorchosen-1], "door")
"AN OPPORTUNITY TO CHANGE YOUR MIND"
c5str = str(result.get_counts('W_states'))
randomnb = (int(c5str[2]) + int(c5str[3])) %2
if c5str[4] == "1": #car behind left door
Doorwinning = 1
if Doorchosen == 1:
Dooropen = 2 + randomnb
Doorswitch = 3 - randomnb
if Doorchosen == 2:
Dooropen = 3
Doorswitch = 1
if Doorchosen == 3:
Dooropen = 2
Doorswitch = 1
if c5str[5] == "1": #car behind central door
Doorwinning = 2
if Doorchosen == 2:
Dooropen = 1 + 2*randomnb
Doorswitch = 3 - 2*randomnb
if Doorchosen == 1:
Dooropen = 3
Doorswitch = 2
if Doorchosen == 3:
Dooropen = 1
Doorswitch = 2
if c5str[6] == "1": #car behind right door
Doorwinning = 3
if Doorchosen == 3:
Dooropen = randomnb + 1
Doorswitch = 2 - randomnb
if Doorchosen == 1:
Dooropen = 2
Doorswitch = 3
if Doorchosen == 2:
Dooropen = 1
Doorswitch = 3
if Dooropen == 1:
picl = goa
if Dooropen == 2:
picc = goa
if Dooropen == 3:
picr = goa
print(top_bottom," ",top_bottom," ",top_bottom)
for i in range(9):
print(picl[i]," ",picc[i]," ",picr[i])
print(top_bottom," ",top_bottom," ",top_bottom,"\n")
print('I opened the', Label[Dooropen-1], 'door and you see a goat')
print('You get now an opportunity to change your choice!')
print("Do you want to switch for the ",Label[Doorswitch-1], " door?")
I_switch = input(" Answer by (y/n) + enter\n").upper()
if (I_switch == "Y"):
Doorfinal = Doorswitch
else:
Doorfinal = Doorchosen
"FINAL ANNOUNCE"
if Doorfinal == Doorwinning:
if Doorfinal == 1:
picl = car
if Doorfinal == 2:
picc = car
if Doorfinal == 3:
picr = car
endmessage = 'won the car! Congratulations!'
else:
if Doorfinal == 1:
picl = goa
if Doorfinal == 2:
picc = goa
if Doorfinal == 3:
picr = goa
endmessage = 'won a goat! Sorry!'
print(top_bottom," ",top_bottom," ",top_bottom)
for i in range(9):
print(picl[i]," ",picc[i]," ",picr[i])
print(top_bottom," ",top_bottom," ",top_bottom,"\n")
print('Game master: You opened the',Label[Doorfinal-1],'door and', endmessage)
"STATISTICS"
nb_games = nb_games + 1
nb_randomnb = nb_randomnb + randomnb
if Doorfinal == Doorswitch:
nb_switches = nb_switches +1
if c5str[Doorfinal+3] == "1":
nb_won_switching = nb_won_switching + 1
else:
nb_stays = nb_stays+1
if c5str[Doorfinal+3] == "1":
nb_won_sticking = nb_won_sticking + 1
n_won = nb_won_switching + nb_won_sticking
print()
print("YOUR STATS")
print("nb of games: ", nb_games," total nb won:", n_won, " first choice: left",nb_left," center", nb_center,"right", nb_right)
print("nb sticking: ",nb_stays," nb won when sticking: ",nb_won_sticking,"nb switching:",nb_switches," nb won when switching:",nb_won_switching)
repeat = input("Another game? Answer by (y/n) + enter\n").upper()
print("Game over")
%run "../version.ipynb"
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# If you introduce a list with less colors than bars, the color of the bars will
# alternate following the sequence from the list.
import numpy as np
from qiskit.quantum_info import DensityMatrix
from qiskit import QuantumCircuit
from qiskit.visualization import plot_state_paulivec
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc = QuantumCircuit(2)
qc.h([0, 1])
qc.cz(0, 1)
qc.ry(np.pi/3, 0)
qc.rx(np.pi/5, 1)
matrix = DensityMatrix(qc)
plot_state_paulivec(matrix, color=['crimson', 'midnightblue', 'seagreen'])
|
https://github.com/qBraid/qBraid
|
qBraid
|
# Copyright (C) 2024 qBraid
#
# This file is part of the qBraid-SDK
#
# The qBraid-SDK is free software released under the GNU General Public License v3
# or later. You can redistribute and/or modify it under the terms of the GPL v3.
# See the LICENSE file in the project root or <https://www.gnu.org/licenses/gpl-3.0.html>.
#
# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3.
"""
Module defining Qiskit OpenQASM conversions
"""
from typing import TYPE_CHECKING
from qbraid_core._import import LazyLoader
from qbraid.passes.qasm3.compat import add_stdgates_include, insert_gate_def, replace_gate_name
from qbraid.transpiler.annotations import weight
qiskit_qasm3 = LazyLoader("qiskit_qasm3", globals(), "qiskit.qasm3")
if TYPE_CHECKING:
import qiskit as qiskit_
def transform_notation(qasm3: str) -> str:
"""
Process an OpenQASM 3 program that was generated by
an external tool to make it compatible with Qiskit.
"""
replacements = {
"cnot": "cx",
"si": "sdg",
"ti": "tdg",
"v": "sx",
"vi": "sxdg",
"phaseshift": "p",
"cphaseshift": "cp",
}
for old, new in replacements.items():
qasm3 = replace_gate_name(qasm3, old, new)
qasm3 = add_stdgates_include(qasm3)
qasm3 = insert_gate_def(qasm3, "iswap")
qasm3 = insert_gate_def(qasm3, "sxdg")
return qasm3
@weight(1)
def qasm3_to_qiskit(qasm3: str) -> "qiskit_.QuantumCircuit":
"""Convert QASM 3.0 string to a Qiskit QuantumCircuit representation.
Args:
qasm3 (str): A string in QASM 3.0 format.
Returns:
qiskit.QuantumCircuit: A QuantumCircuit object representing the input QASM 3.0 string.
"""
try:
return qiskit_qasm3.loads(qasm3)
except qiskit_qasm3.QASM3ImporterError:
pass
qasm3 = transform_notation(qasm3)
return qiskit_qasm3.loads(qasm3)
|
https://github.com/Z-928/Bugs4Q
|
Z-928
|
from qiskit import *
qc = QuantumCircuit(2)
qc.h(i)
qc.crz (PI/4, 0, 1)
|
https://github.com/GIRISHBELANI/QC_Benchmarks_using_dm-simulator
|
GIRISHBELANI
|
"""
This is the final implementation of Shor's Algorithm using the circuit presented in section 2.3 of the report about the first
simplification introduced by the base paper used.
As the circuit is completely general, it is a rather long circuit, with a lot of QASM instructions in the generated Assembly code,
which makes that for high values of N the code is not able to run in IBM Q Experience because IBM has a very low restriction on the number os QASM instructions
it can run. For N=15, it can run on IBM. But, for example, for N=21 it already may not, because it exceeds the restriction of QASM instructions. The user can try
to use n qubits on top register instead of 2n to get more cases working on IBM. This will, however and naturally, diminish the probabilty of success.
For a small number of qubits (about until 20), the code can be run on a local simulator. This makes it to be a little slow even for the factorization of small
numbers N. Because of this, although all is general and we ask the user to introduce the number N and if he agrees with the 'a' value selected or not,
we after doing that force N=15 and a=4, because that is a case where the simulation, although slow, can be run in local simulator and does not last 'forever' to end.
If the user wants he can just remove the 2 lines of code where that is done, and put bigger N (that will be slow) or can try to run on the ibm simulator (for that,
the user should introduce its IBM Q Experience Token and be aware that for high values of N it will just receive a message saying the size of the circuit is too big)
"""
""" Imports from qiskit"""
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute, IBMQ
from qiskit import BasicAer
import sys
""" Imports to Python functions """
import math
import array
import fractions
import numpy as np
import time
""" Local Imports """
from cfunctions import check_if_power, get_value_a
from cfunctions import get_factors
from qfunctions import create_QFT, create_inverse_QFT
from qfunctions import cMULTmodN
""" Main program """
if __name__ == '__main__':
""" Ask for analysis number N """
N = int(input('Please insert integer number N: '))
print('input number was: {0}\n'.format(N))
""" Check if N==1 or N==0"""
if N==1 or N==0:
print('Please put an N different from 0 and from 1')
exit()
""" Check if N is even """
if (N%2)==0:
print('N is even, so does not make sense!')
exit()
""" Check if N can be put in N=p^q, p>1, q>=2 """
""" Try all numbers for p: from 2 to sqrt(N) """
if check_if_power(N)==True:
exit()
print('Not an easy case, using the quantum circuit is necessary\n')
""" To login to IBM Q experience the following functions should be called """
"""
IBMQ.delete_accounts()
IBMQ.save_account('insert token here')
IBMQ.load_accounts()
"""
""" Get an integer a that is coprime with N """
a = get_value_a(N)
""" If user wants to force some values, he can do that here, please make sure to update the print and that N and a are coprime"""
print('Forcing N=15 and a=4 because its the fastest case, please read top of source file for more info')
N=15
a=4
""" Get n value used in Shor's algorithm, to know how many qubits are used """
n = math.ceil(math.log(N,2))
print('Total number of qubits used: {0}\n'.format(4*n+2))
ts = time.time()
""" Create quantum and classical registers """
"""auxilliary quantum register used in addition and multiplication"""
aux = QuantumRegister(n+2)
"""quantum register where the sequential QFT is performed"""
up_reg = QuantumRegister(2*n)
"""quantum register where the multiplications are made"""
down_reg = QuantumRegister(n)
"""classical register where the measured values of the QFT are stored"""
up_classic = ClassicalRegister(2*n)
""" Create Quantum Circuit """
circuit = QuantumCircuit(down_reg , up_reg , aux, up_classic)
""" Initialize down register to 1 and create maximal superposition in top register """
circuit.h(up_reg)
circuit.x(down_reg[0])
""" Apply the multiplication gates as showed in the report in order to create the exponentiation """
for i in range(0, 2*n):
cMULTmodN(circuit, up_reg[i], down_reg, aux, int(pow(a, pow(2, i))), N, n)
""" Apply inverse QFT """
create_inverse_QFT(circuit, up_reg, 2*n ,1)
""" Measure the top qubits, to get x value"""
circuit.measure(up_reg,up_classic)
""" show results of circuit creation """
create_time = round(time.time()-ts, 3)
#if n < 8: print(circuit)
print(f"... circuit creation time = {create_time}")
ts = time.time()
""" Select how many times the circuit runs"""
number_shots=int(input('Number of times to run the circuit: '))
if number_shots < 1:
print('Please run the circuit at least one time...')
exit()
if number_shots > 1:
print('\nIf the circuit takes too long to run, consider running it less times\n')
""" Print info to user """
print('Executing the circuit {0} times for N={1} and a={2}\n'.format(number_shots,N,a))
""" Simulate the created Quantum Circuit """
simulation = execute(circuit, backend=BasicAer.get_backend('qasm_simulator'),shots=number_shots)
""" to run on IBM, use backend=IBMQ.get_backend('ibmq_qasm_simulator') in execute() function """
""" to run locally, use backend=BasicAer.get_backend('qasm_simulator') in execute() function """
""" Get the results of the simulation in proper structure """
sim_result=simulation.result()
counts_result = sim_result.get_counts(circuit)
""" show execution time """
exec_time = round(time.time()-ts, 3)
print(f"... circuit execute time = {exec_time}")
""" Print info to user from the simulation results """
print('Printing the various results followed by how many times they happened (out of the {} cases):\n'.format(number_shots))
i=0
while i < len(counts_result):
print('Result \"{0}\" happened {1} times out of {2}'.format(list(sim_result.get_counts().keys())[i],list(sim_result.get_counts().values())[i],number_shots))
i=i+1
""" An empty print just to have a good display in terminal """
print(' ')
""" Initialize this variable """
prob_success=0
""" For each simulation result, print proper info to user and try to calculate the factors of N"""
i=0
while i < len(counts_result):
""" Get the x_value from the final state qubits """
output_desired = list(sim_result.get_counts().keys())[i]
x_value = int(output_desired, 2)
prob_this_result = 100 * ( int( list(sim_result.get_counts().values())[i] ) ) / (number_shots)
print("------> Analysing result {0}. This result happened in {1:.4f} % of all cases\n".format(output_desired,prob_this_result))
""" Print the final x_value to user """
print('In decimal, x_final value for this result is: {0}\n'.format(x_value))
""" Get the factors using the x value obtained """
success=get_factors(int(x_value),int(2*n),int(N),int(a))
if success==True:
prob_success = prob_success + prob_this_result
i=i+1
print("\nUsing a={0}, found the factors of N={1} in {2:.4f} % of the cases\n".format(a,N,prob_success))
|
https://github.com/tomtuamnuq/compare-qiskit-ocean
|
tomtuamnuq
|
from mip import Model, BINARY, xsum, OptimizationStatus # CONTINUOUS, INTEGER
import numpy as np
from itertools import permutations
A = np.array([[1,1,1,1,1],
[2,2,2,2,2],
[0,1,2,3,4],
[2,1,4,1,0],
[0,1,0,1,0]])
var_type = BINARY
max_gap = 0.05
max_seconds=10
def createModelAndCheck(A, b, var_type, max_gap, max_seconds):
n,m = A.shape
model = Model()
x = [model.add_var(var_type=var_type) for i in range(n)]
for i in range(m):
A_i = A[i]
b_i = b[i]
model += xsum(A_i[j]*x[j] for j in range(n)) == b_i
model.max_gap = max_gap
status = model.optimize(max_seconds=max_seconds)
if status != OptimizationStatus.INFEASIBLE:
print("Found b: ", b)
if status == OptimizationStatus.OPTIMAL:
print('optimal solution cost {} found'.format(model.objective_value))
elif status == OptimizationStatus.FEASIBLE:
print('sol.cost {} found, best possible: {}'.format(model.objective_value, model.objective_bound))
elif status == OptimizationStatus.NO_SOLUTION_FOUND:
print('no feasible solution found, lower bound is: {}'.format(model.objective_bound))
if status == OptimizationStatus.OPTIMAL or status == OptimizationStatus.FEASIBLE:
print('solution:')
for v in model.vars:
if abs(v.x) > 1e-6: # only printing non-zeros
print('{} : {}'.format(v.name, v.x))
brute_force_b = list(permutations([0,1,2,3,4,5,6,7,8],5))
for b in brute_force_b:
createModelAndCheck(A, np.array(b), var_type, max_gap, max_seconds)
A.dot(np.array([0,1,1,0,0]))
|
https://github.com/dimple12M/Qiskit-Certification-Guide
|
dimple12M
|
import numpy as np
from qiskit import QuantumCircuit, BasicAer, execute
from qiskit.circuit.library import YGate
from qiskit.quantum_info import Operator, average_gate_fidelity, process_fidelity, state_fidelity
#we define a operator op_a = Ygate
op_a = Operator(YGate())
# we define also op_b=np.exp(1j / 2) * op_a which is essentially op_a but with a global phase np.exp(1j / 2)
op_b = np.exp(1j / 2) * op_a
#we run the fidelity for those gates
average_gate_fidelity(op_a,op_b)
process_fidelity(op_a, op_b)
# same here, but now with a general state
n = 1/np.sqrt(3)
desired_state = [n,np.sqrt(1-n**2)]
qc = QuantumCircuit(1)
qc.initialize(desired_state,0)
qc.draw('mpl')
# we run it with help of a simulator
back_sv = BasicAer.get_backend('statevector_simulator')
result = execute(qc, back_sv).result()
qc_sv = result.get_statevector(qc)
#Now, we run the fidelity for those states and we see they are the same
state_fidelity(desired_state, qc_sv)
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
# useful additional packages
import matplotlib.pyplot as plt
import matplotlib.axes as axes
%matplotlib inline
import numpy as np
import networkx as nx
from qiskit.tools.visualization import plot_histogram
from qiskit_aqua import Operator, run_algorithm, get_algorithm_instance
from qiskit_aqua.input import get_input_instance
from qiskit_aqua.translators.ising import maxcut, tsp
# setup aqua logging
import logging
from qiskit_aqua._logging import set_logging_config, build_logging_config
# set_logging_config(build_logging_config(logging.DEBUG)) # choose INFO, DEBUG to see the log
# ignoring deprecation errors on matplotlib
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
from qiskit import IBMQ
IBMQ.load_accounts()
# Generating a graph of 3 nodes
n = 3
num_qubits = n ** 2
ins = tsp.random_tsp(n)
G = nx.Graph()
G.add_nodes_from(np.arange(0, n, 1))
colors = ['r' for node in G.nodes()]
pos = {k: v for k, v in enumerate(ins.coord)}
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
print('distance\n', ins.w)
from itertools import permutations
def brute_force_tsp(w, N):
a=list(permutations(range(1,N)))
last_best_distance = 1e10
for i in a:
distance = 0
pre_j = 0
for j in i:
distance = distance + w[j,pre_j]
pre_j = j
distance = distance + w[pre_j,0]
order = (0,) + i
if distance < last_best_distance:
best_order = order
last_best_distance = distance
print('order = ' + str(order) + ' Distance = ' + str(distance))
return last_best_distance, best_order
best_distance, best_order = brute_force_tsp(ins.w, ins.dim)
print('Best order from brute force = ' + str(best_order) + ' with total distance = ' + str(best_distance))
def draw_tsp_solution(G, order, colors, pos):
G2 = G.copy()
n = len(order)
for i in range(n):
j = (i + 1) % n
G2.add_edge(order[i], order[j])
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G2, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
draw_tsp_solution(G, best_order, colors, pos)
qubitOp, offset = tsp.get_tsp_qubitops(ins)
algo_input = get_input_instance('EnergyInput')
algo_input.qubit_op = qubitOp
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
algorithm_cfg = {
'name': 'ExactEigensolver',
}
params = {
'problem': {'name': 'ising'},
'algorithm': algorithm_cfg
}
result = run_algorithm(params,algo_input)
print('energy:', result['energy'])
#print('tsp objective:', result['energy'] + offset)
x = tsp.sample_most_likely(result['eigvecs'][0])
print('feasible:', tsp.tsp_feasible(x))
z = tsp.get_tsp_solution(x)
print('solution:', z)
print('solution objective:', tsp.tsp_value(z, ins.w))
draw_tsp_solution(G, z, colors, pos)
algorithm_cfg = {
'name': 'VQE',
'operator_mode': 'matrix'
}
optimizer_cfg = {
'name': 'SPSA',
'max_trials': 300
}
var_form_cfg = {
'name': 'RY',
'depth': 5,
'entanglement': 'linear'
}
params = {
'problem': {'name': 'ising', 'random_seed': 10598},
'algorithm': algorithm_cfg,
'optimizer': optimizer_cfg,
'variational_form': var_form_cfg,
'backend': {'name': 'statevector_simulator'}
}
result = run_algorithm(params,algo_input)
print('energy:', result['energy'])
print('time:', result['eval_time'])
#print('tsp objective:', result['energy'] + offset)
x = tsp.sample_most_likely(result['eigvecs'][0])
print('feasible:', tsp.tsp_feasible(x))
z = tsp.get_tsp_solution(x)
print('solution:', z)
print('solution objective:', tsp.tsp_value(z, ins.w))
draw_tsp_solution(G, z, colors, pos)
# run quantum algorithm with shots
params['algorithm']['operator_mode'] = 'grouped_paulis'
params['backend']['name'] = 'qasm_simulator'
params['backend']['shots'] = 1024
result = run_algorithm(params,algo_input)
print('energy:', result['energy'])
print('time:', result['eval_time'])
#print('tsp objective:', result['energy'] + offset)
x = tsp.sample_most_likely(result['eigvecs'][0])
print('feasible:', tsp.tsp_feasible(x))
z = tsp.get_tsp_solution(x)
print('solution:', z)
print('solution objective:', tsp.tsp_value(z, ins.w))
plot_histogram(result['eigvecs'][0])
draw_tsp_solution(G, z, colors, pos)
|
https://github.com/lynnlangit/learning-quantum
|
lynnlangit
|
from qiskit import QuantumCircuit, Aer, execute, IBMQ
from qiskit.utils import QuantumInstance
import numpy as np
from qiskit.algorithms import Shor
IBMQ.enable_account('ENTER API TOKEN HERE') # Enter your API token here
provider = IBMQ.get_provider(hub='ibm-q')
backend = Aer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1000)
my_shor = Shor(quantum_instance)
result_dict = my_shor.factor(15)
print(result_dict)
|
https://github.com/Bikramaditya0154/Quantum-Simulation-of-the-ground-states-of-Li-and-Li-2-using-Variational-Quantum-EIgensolver
|
Bikramaditya0154
|
from qiskit import Aer
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import (
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
molecule = Molecule(
geometry=[["Li", [0.0, 0.0, 0.0]]], charge=2, multiplicity=2
)
driver = ElectronicStructureMoleculeDriver(
molecule, basis="sto3g", driver_type=ElectronicStructureDriverType.PYSCF
)
es_problem = ElectronicStructureProblem(driver)
qubit_converter = QubitConverter(JordanWignerMapper())
from qiskit.providers.aer import StatevectorSimulator
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_nature.algorithms import VQEUCCFactory
quantum_instance = QuantumInstance(backend=Aer.get_backend("aer_simulator_statevector"))
vqe_solver = VQEUCCFactory(quantum_instance=quantum_instance)
from qiskit.algorithms import VQE
from qiskit.circuit.library import TwoLocal
tl_circuit = TwoLocal(
rotation_blocks=["h", "rx"],
entanglement_blocks="cz",
entanglement="full",
reps=2,
parameter_prefix="y",
)
another_solver = VQE(
ansatz=tl_circuit,
quantum_instance=QuantumInstance(Aer.get_backend("aer_simulator_statevector")),
)
from qiskit_nature.algorithms import GroundStateEigensolver
calc = GroundStateEigensolver(qubit_converter, vqe_solver)
res = calc.solve(es_problem)
print(res)
|
https://github.com/yforman/QAOA
|
yforman
|
#In case you don't have qiskit, install it now
%pip install qiskit --quiet
#Installing/upgrading pylatexenc seems to have fixed my mpl issue
#If you try this and it doesn't work, try also restarting the runtime/kernel
%pip install pylatexenc --quiet
!pip install -Uqq ipdb
!pip install qiskit_optimization
import networkx as nx
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer
from qiskit.compiler import transpile
from qiskit.quantum_info.operators import Operator, Pauli
from qiskit.quantum_info import process_fidelity
from qiskit.extensions.hamiltonian_gate import HamiltonianGate
from qiskit.extensions import RXGate, XGate, CXGate
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute
import numpy as np
from qiskit.visualization import plot_histogram
import ipdb
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
#quadratic optimization
from qiskit_optimization import QuadraticProgram
from qiskit_optimization.converters import QuadraticProgramToQubo
%pdb on
# def ApplyCost(qc, gamma):
# Ix = np.array([[1,0],[0,1]])
# Zx= np.array([[1,0],[0,-1]])
# Xx = np.array([[0,1],[1,0]])
# Temp = (Ix-Zx)/2
# T = Operator(Temp)
# I = Operator(Ix)
# Z = Operator(Zx)
# X = Operator(Xx)
# FinalOp=-2*(T^I^T)-(I^T^T)-(T^I^I)+2*(I^T^I)-3*(I^I^T)
# ham = HamiltonianGate(FinalOp,gamma)
# qc.append(ham,[0,1,2])
task = QuadraticProgram(name = 'QUBO on QC')
task.binary_var(name = 'x')
task.binary_var(name = 'y')
task.binary_var(name = 'z')
task.minimize(linear = {"x":-1,"y":2,"z":-3}, quadratic = {("x", "z"): -2, ("y", "z"): -1})
qubo = QuadraticProgramToQubo().convert(task) #convert to QUBO
operator, offset = qubo.to_ising()
print(operator)
# ham = HamiltonianGate(operator,0)
# print(ham)
Ix = np.array([[1,0],[0,1]])
Zx= np.array([[1,0],[0,-1]])
Xx = np.array([[0,1],[1,0]])
Temp = (Ix-Zx)/2
T = Operator(Temp)
I = Operator(Ix)
Z = Operator(Zx)
X = Operator(Xx)
FinalOp=-2*(T^I^T)-(I^T^T)-(T^I^I)+2*(I^T^I)-3*(I^I^T)
ham = HamiltonianGate(FinalOp,0)
print(ham)
#define PYBIND11_DETAILED_ERROR_MESSAGES
def compute_expectation(counts):
"""
Computes expectation value based on measurement results
Args:
counts: dict
key as bitstring, val as count
G: networkx graph
Returns:
avg: float
expectation value
"""
avg = 0
sum_count = 0
for bitstring, count in counts.items():
x = int(bitstring[2])
y = int(bitstring[1])
z = int(bitstring[0])
obj = -2*x*z-y*z-x+2*y-3*z
avg += obj * count
sum_count += count
return avg/sum_count
# We will also bring the different circuit components that
# build the qaoa circuit under a single function
def create_qaoa_circ(theta):
"""
Creates a parametrized qaoa circuit
Args:
G: networkx graph
theta: list
unitary parameters
Returns:
qc: qiskit circuit
"""
nqubits = 3
n,m=3,3
p = len(theta)//2 # number of alternating unitaries
qc = QuantumCircuit(nqubits,nqubits)
Ix = np.array([[1,0],[0,1]])
Zx= np.array([[1,0],[0,-1]])
Xx = np.array([[0,1],[1,0]])
Temp = (Ix-Zx)/2
T = Operator(Temp)
I = Operator(Ix)
Z = Operator(Zx)
X = Operator(Xx)
FinalOp=-2*(Z^I^Z)-(I^Z^Z)-(Z^I^I)+2*(I^Z^I)-3*(I^I^Z)
beta = theta[:p]
gamma = theta[p:]
# initial_state
for i in range(0, nqubits):
qc.h(i)
for irep in range(0, p):
#ipdb.set_trace(context=6)
# problem unitary
# for pair in list(G.edges()):
# qc.rzz(2 * gamma[irep], pair[0], pair[1])
#ApplyCost(qc,2*0)
ham = HamiltonianGate(operator,2 * gamma[irep])
qc.append(ham,[0,1,2])
# mixer unitary
for i in range(0, nqubits):
qc.rx(2 * beta[irep], i)
qc.measure(qc.qubits[:n],qc.clbits[:m])
return qc
# Finally we write a function that executes the circuit on the chosen backend
def get_expectation(shots=512):
"""
Runs parametrized circuit
Args:
G: networkx graph
p: int,
Number of repetitions of unitaries
"""
backend = Aer.get_backend('qasm_simulator')
backend.shots = shots
def execute_circ(theta):
qc = create_qaoa_circ(theta)
# ipdb.set_trace(context=6)
counts = {}
job = execute(qc, backend, shots=1024)
result = job.result()
counts=result.get_counts(qc)
return compute_expectation(counts)
return execute_circ
from scipy.optimize import minimize
expectation = get_expectation()
res = minimize(expectation, [1, 1], method='COBYLA')
expectation = get_expectation()
res = minimize(expectation, res.x, method='COBYLA')
res
from qiskit.visualization import plot_histogram
backend = Aer.get_backend('aer_simulator')
backend.shots = 512
qc_res = create_qaoa_circ(res.x)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc_res, backend, shots=1024)
result = job.result()
counts=result.get_counts(qc_res)
plot_histogram(counts)
|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
import numpy as np
import matplotlib.pyplot as plt
import itertools
from pprint import pprint
import pickle
import time
import datetime
# Import qubit states Zero (|0>) and One (|1>), and Pauli operators (X, Y, Z)
from qiskit.opflow import Zero, One, I, X, Y, Z
from qiskit import QuantumCircuit, QuantumRegister, IBMQ, execute, transpile, Aer
from qiskit.tools.monitor import job_monitor
from qiskit.circuit import Parameter
from qiskit.transpiler.passes import RemoveBarriers
# Import QREM package
from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
from qiskit.ignis.mitigation import expectation_value
# Import mitiq for zne
import mitiq
# Import state tomography modules
from qiskit.ignis.verification.tomography import state_tomography_circuits
from qiskit.quantum_info import state_fidelity
import sys
import importlib
sys.path.append("../utils/")
import circuit_utils, zne_utils, tomography_utils, sgs_algorithm
importlib.reload(circuit_utils)
importlib.reload(zne_utils)
importlib.reload(tomography_utils)
importlib.reload(sgs_algorithm)
from circuit_utils import *
from zne_utils import *
from tomography_utils import *
from sgs_algorithm import *
# Combine subcircuits into a single multiqubit gate representing a single trotter step
num_qubits = 3
# The final time of the state evolution
target_time = np.pi
# Parameterize variable t to be evaluated at t=pi later
dt = Parameter('t')
# Convert custom quantum circuit into a gate
trot_gate = trotter_gate(dt)
# initial layout
initial_layout = [5,3,1]
# Number of trotter steps
num_steps = 100
print("trotter step: ", num_steps)
scale_factors = [1.0, 2.0, 3.0]
# Initialize quantum circuit for 3 qubits
qr = QuantumRegister(num_qubits, name="q")
qc = QuantumCircuit(qr)
# Prepare initial state (remember we are only evolving 3 of the 7 qubits on jakarta qubits (q_5, q_3, q_1) corresponding to the state |110>)
make_initial_state(qc, "110") # DO NOT MODIFY (|q_5,q_3,q_1> = |110>)
subspace_encoder_init110(qc, targets=[0, 1, 2]) # encode
trotterize(qc, trot_gate, num_steps, targets=[1, 2]) # Simulate time evolution under H_heis3 Hamiltonian
subspace_decoder_init110(qc, targets=[0, 1, 2]) # decode
# Evaluate simulation at target_time (t=pi) meaning each trotter step evolves pi/trotter_steps in time
qc = qc.bind_parameters({dt: target_time / num_steps})
print("created qc")
# Generate state tomography circuits to evaluate fidelity of simulation
st_qcs = state_tomography_circuits(qc, [0, 1, 2][::-1]) #! state tomography requires === BIG ENDIAN ===
print("created st_qcs (length:", len(st_qcs), ")")
# remove barriers
st_qcs = [RemoveBarriers()(qc) for qc in st_qcs]
print("removed barriers from st_qcs")
# optimize circuit
t3_st_qcs = transpile(st_qcs, optimization_level=3, basis_gates=["sx", "cx", "rz"])
t3_st_qcs = transpile(t3_st_qcs, optimization_level=3, basis_gates=["sx", "cx", "rz"])
print("created t3_st_qcs (length:", len(t3_st_qcs), ")")
# zne wrapping
zne_qcs = zne_wrapper(t3_st_qcs, scale_factors = scale_factors, pt = True) # Pauli Twirling
print("created zne_qcs (length:", len(zne_qcs), ")")
# optimization_level must be 0
# feed initial_layout here to see the picture of the circuits before casting the job
t3_zne_qcs = transpile(zne_qcs, optimization_level=0, basis_gates=["sx", "cx", "rz"], initial_layout=initial_layout)
print("created t3_zne_qcs (length:", len(t3_zne_qcs), ")")
t3_zne_qcs[-3].draw("mpl")
# from qiskit.test.mock import FakeJakarta
# backend = FakeJakarta()
# backend = Aer.get_backend("qasm_simulator")
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q-community', group='ibmquantumawards', project='open-science-22')
print("provider:", provider)
backend = provider.get_backend("ibmq_jakarta")
print(str(backend))
shots = 1 << 13
reps = 8 # unused
jobs = []
for _ in range(reps):
#! CHECK: run t3_zne_qcs, with optimization_level = 0 and straightforward initial_layout
job = execute(t3_zne_qcs, backend, shots=shots, optimization_level=0)
print('Job ID', job.job_id())
jobs.append(job)
# QREM
qr = QuantumRegister(num_qubits, name="calq")
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
# we have to feed initial_layout to calibration matrix
cal_job = execute(meas_calibs, backend=backend, shots=shots, optimization_level=3, initial_layout = initial_layout)
print('Job ID', cal_job.job_id())
meas_calibs[0].draw("mpl")
dt_now = datetime.datetime.now()
print(dt_now)
filename = "job_ids_" + str(backend) + "_100step_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl"
print(filename)
with open("jobs_" + str(backend) + "_100step_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
pickle.dump({"jobs": jobs, "cal_job": cal_job}, f)
with open(filename, "wb") as f:
pickle.dump({"job_ids": [job.job_id() for job in jobs], "cal_job_id": cal_job.job_id()}, f)
with open("properties_" + str(backend) + "_" + dt_now.strftime('%Y%m%d_%H%M%S') + "_.pkl", "wb") as f:
pickle.dump(backend.properties(), f)
filename = "job_ids_ibmq_jakarta_100step_20220413_030821_.pkl" # change here
with open(filename, "rb") as f:
job_ids_dict = pickle.load(f)
job_ids = job_ids_dict["job_ids"]
cal_job_id = job_ids_dict["cal_job_id"]
retrieved_jobs = []
for job_id in job_ids:
retrieved_jobs.append(backend.retrieve_job(job_id))
retrieved_cal_job = backend.retrieve_job(cal_job_id)
cal_results = retrieved_cal_job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
target_state = (One^One^Zero).to_matrix() # DO NOT CHANGE!!!
fids = []
for job in retrieved_jobs:
mit_results = meas_fitter.filter.apply(job.result())
zne_expvals = zne_decoder(num_qubits, mit_results, scale_factors = scale_factors)
rho = expvals_to_valid_rho(num_qubits, zne_expvals)
fid = state_fidelity(rho, target_state)
fids.append(fid)
print('state tomography fidelity = {:.4f} \u00B1 {:.4f}'.format(np.mean(fids), np.std(fids)))
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/quantumyatra/quantum_computing
|
quantumyatra
|
# Useful additional packages
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit import QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram
ckt=QuantumCircuit(1)
ckt.x(0)
ckt.y(0)
ckt.z(0)
ckt.draw(output='mpl')
sim_uni = Aer.get_backend('unitary_simulator')
job_uni = execute(ckt, sim_uni, shots=1000)
res_uni = job_uni.result()
uni_matrix = res_uni.get_unitary(ckt)
print (uni_matrix)
|
https://github.com/PacktPublishing/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience
|
PacktPublishing
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
# Import Qiskit
import qiskit
# Set versions variable to the current Qiskit versions
versions=qiskit.__qiskit_version__
# Print the version number for the Qiskit components
print("Qiskit components and versions:")
print("===============================")
for i in versions:
print (i, versions[i])
|
https://github.com/PabloMartinezAngerosa/QAOA-uniform-convergence
|
PabloMartinezAngerosa
|
from tsp_qaoa import test_solution
from qiskit.visualization import plot_histogram
import networkx as nx
import numpy as np
import json
import csv
# Array of JSON Objects
header = ['instance','p','distance', 'mean']
length_p = 3
length_instances = 2
with open('qaoa_multiple_p_distance.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
# write the header
writer.writerow(header)
instance_index = 0
for instance in range(length_instances):
instance_index += 1
first_p = False
UNIFORM_CONVERGENCE_P = []
UNIFORM_CONVERGENCE_SAMPLE = []
for p in range(length_p):
p = p+1
if first_p == False:
print("Vuelve a llamar a test_solution")
job_2, G, UNIFORM_CONVERGENCE_SAMPLE = test_solution(p=p)
first_p = True
else:
job_2, G, UNIFORM_CONVERGENCE_SAMPLE = test_solution(grafo=G, p=p)
# Sort the JSON data based on the value of the brand key
UNIFORM_CONVERGENCE_SAMPLE.sort(key=lambda x: x["mean"])
convergence_min = UNIFORM_CONVERGENCE_SAMPLE[0]
UNIFORM_CONVERGENCE_P.append({
"mean":convergence_min["mean"],
"probabilities": convergence_min["probabilities"]
})
cauchy_function_nk = UNIFORM_CONVERGENCE_P[len(UNIFORM_CONVERGENCE_P) - 1]
p_index = 0
for p_state in UNIFORM_CONVERGENCE_P:
p_index += 1
print(p_index)
mean = p_state["mean"]
print(p_state)
print(mean)
distance_p_cauchy_function_nk = np.max(np.abs(cauchy_function_nk["probabilities"] - p_state["probabilities"]))
writer.writerow([instance_index, p_index, distance_p_cauchy_function_nk, mean])
|
https://github.com/Gopal-Dahale/qiskit-qulacs
|
Gopal-Dahale
|
"""Test Qulacs Estimator Gradients"""
from unittest import TestCase
import numpy as np
import pytest
from ddt import data, ddt
from qiskit import QuantumCircuit, transpile
from qiskit.circuit import Parameter, ParameterVector
from qiskit.circuit.library import EfficientSU2
from qiskit.circuit.library.standard_gates import RXXGate, RYYGate, RZXGate, RZZGate
from qiskit.quantum_info import SparsePauliOp
from qiskit_algorithms.gradients import ReverseEstimatorGradient
from qiskit_qulacs.qulacs_backend import QulacsBackend
from qiskit_qulacs.qulacs_estimator_gradient import QulacsEstimatorGradient
gradient_factories = [QulacsEstimatorGradient]
@ddt
class TestQulacsEstimatorGradient(TestCase):
"""Test Estimator Gradient"""
@data(*gradient_factories)
def test_gradient_operators(self, grad):
"""Test the estimator gradient for different operators"""
a = Parameter("a")
qc = QuantumCircuit(1)
qc.rx(a, 0)
gradient = grad()
op = SparsePauliOp.from_list([("Z", 1)])
correct_result = -1 / np.sqrt(2)
param = [np.pi / 4]
value = gradient.run([qc], [op], [param]).result().gradients[0]
self.assertAlmostEqual(value[0], correct_result, 3)
@data(*gradient_factories)
def test_gradient_efficient_su2(self, grad):
"""Test the estimator gradient for EfficientSU2"""
qc = EfficientSU2(2, reps=1).decompose()
op = SparsePauliOp.from_list([("ZI", 1)])
gradient = grad()
param_list = [
[np.pi / 4 for param in qc.parameters],
[np.pi / 2 for param in qc.parameters],
]
correct_results = [
[
-0.35355339,
-0.70710678,
0,
0.35355339,
0,
-0.70710678,
0,
0,
],
[0, 0, 0, 1, 0, 0, 0, 0],
]
for i, param in enumerate(param_list):
gradients = gradient.run([qc], [op], [param]).result().gradients[0]
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@data(*gradient_factories)
def test_gradient_2qubit_gate(self, grad):
"""Test the estimator gradient for 2 qubit gates"""
qulacs_backend = QulacsBackend()
for gate in [RXXGate, RYYGate, RZZGate, RZXGate]:
param_list = [[np.pi / 4], [np.pi / 2]]
correct_results = [
[-0.70710678],
[-1],
]
op = SparsePauliOp.from_list([("ZI", 1)])
for i, param in enumerate(param_list):
a = Parameter("a")
qc = QuantumCircuit(2)
gradient = grad()
if gate is RZZGate:
qc.h([0, 1])
qc.append(gate(a), [qc.qubits[0], qc.qubits[1]], [])
qc.h([0, 1])
else:
qc.append(gate(a), [qc.qubits[0], qc.qubits[1]], [])
tqc = transpile(qc, qulacs_backend)
gradients = gradient.run([tqc], [op], [param]).result().gradients[0]
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@data(*gradient_factories)
def test_gradient_parameters(self, grad):
"""Test the estimator gradient for parameters"""
a = Parameter("a")
b = Parameter("b")
qc = QuantumCircuit(1)
qc.rx(a, 0)
qc.rx(b, 0)
gradient = grad()
param_list = [[np.pi / 4, np.pi / 2]]
correct_results = [
[-0.70710678],
]
op = SparsePauliOp.from_list([("Z", 1)])
for i, param in enumerate(param_list):
gradients = (
gradient.run([qc], [op], [param], parameters=[[a]])
.result()
.gradients[0]
)
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
# parameter order
with self.subTest(msg="The order of gradients"):
c = Parameter("c")
qc = QuantumCircuit(1)
qc.rx(a, 0)
qc.rz(b, 0)
qc.rx(c, 0)
param_list = [[np.pi / 4, np.pi / 2, np.pi / 3]]
correct_results = [
[-0.35355339, 0.61237244, -0.61237244],
[-0.61237244, 0.61237244, -0.35355339],
[-0.35355339, -0.61237244],
[-0.61237244, -0.35355339],
]
param = [[a, b, c], [c, b, a], [a, c], [c, a]]
op = SparsePauliOp.from_list([("Z", 1)])
for i, p in enumerate(param):
gradient = grad()
gradients = (
gradient.run([qc], [op], param_list, parameters=[p])
.result()
.gradients[0]
)
np.testing.assert_allclose(gradients, correct_results[i], atol=1e-3)
@data(*gradient_factories)
def test_gradient_multi_arguments(self, grad):
"""Test the estimator gradient for multiple arguments"""
a = Parameter("a")
b = Parameter("b")
qc = QuantumCircuit(1)
qc.rx(a, 0)
qc2 = QuantumCircuit(1)
qc2.rx(b, 0)
gradient = grad()
param_list = [[np.pi / 4], [np.pi / 2]]
correct_results = [
[-0.70710678],
[-1],
]
op = SparsePauliOp.from_list([("Z", 1)])
gradients = gradient.run([qc, qc2], [op] * 2, param_list).result().gradients
np.testing.assert_allclose(gradients, correct_results, atol=1e-3)
c = Parameter("c")
qc3 = QuantumCircuit(1)
qc3.rx(c, 0)
qc3.ry(a, 0)
param_list2 = [[np.pi / 4], [np.pi / 4, np.pi / 4], [np.pi / 4, np.pi / 4]]
correct_results2 = [
[-0.70710678],
[-0.5],
[-0.5, -0.5],
]
gradients2 = (
gradient.run(
[qc, qc3, qc3], [op] * 3, param_list2, parameters=[[a], [c], None]
)
.result()
.gradients
)
np.testing.assert_allclose(gradients2[0], correct_results2[0], atol=1e-3)
np.testing.assert_allclose(gradients2[1], correct_results2[1], atol=1e-3)
np.testing.assert_allclose(gradients2[2], correct_results2[2], atol=1e-3)
@data(*gradient_factories)
def test_gradient_validation(self, grad):
"""Test estimator gradient's validation"""
a = Parameter("a")
qc = QuantumCircuit(1)
qc.rx(a, 0)
gradient = grad()
param_list = [[np.pi / 4], [np.pi / 2]]
op = SparsePauliOp.from_list([("Z", 1)])
with self.assertRaises(ValueError):
gradient.run([qc], [op], param_list)
with self.assertRaises(ValueError):
gradient.run([qc, qc], [op, op], param_list, parameters=[[a]])
with self.assertRaises(ValueError):
gradient.run([qc, qc], [op], param_list, parameters=[[a]])
with self.assertRaises(ValueError):
gradient.run([qc], [op], [[np.pi / 4, np.pi / 4]])
@data(*gradient_factories)
def test_gradient_with_parameter_vector(self, grad):
"""Tests that the gradient of a circuit with a parameter vector is calculated correctly."""
qiskit_circuit = QuantumCircuit(1)
theta_param = ParameterVector("ΞΈ", 2)
theta_val = np.array([np.pi / 4, np.pi / 16])
qiskit_circuit.rx(theta_param[0], 0)
qiskit_circuit.rx(theta_param[1] * 4, 0)
op = SparsePauliOp.from_list([("Z", 1)])
est_grad = grad()
have_gradient = (
est_grad.run([qiskit_circuit], [op], [theta_val]).result().gradients[0]
)
want_gradient = [-1, -4]
assert np.allclose(have_gradient, want_gradient)
@data(*gradient_factories)
def test_gradient_with_parameter_expressions(self, grad):
"""Tests that the gradient of a circuit with parameter expressions is calculated correctly."""
qiskit_circuit = QuantumCircuit(1)
theta_param = ParameterVector("ΞΈ", 3)
theta_val = [3 * np.pi / 16, np.pi / 64]
phi_param = Parameter("Ο")
phi_val = [np.pi / 8]
# Apply an instruction with a regular parameter.
qiskit_circuit.rx(phi_param, 0)
# Apply an instruction with a parameter vector element.
qiskit_circuit.rx(theta_param[0], 0)
# Apply an instruction with a parameter expression involving one parameter.
qiskit_circuit.rx(theta_param[1] + theta_param[1] + np.cos(theta_param[1]), 0)
op = SparsePauliOp.from_list([("Z", 1)])
est_grad = grad()
qiskit_grad = ReverseEstimatorGradient()
have_gradient = (
est_grad.run([qiskit_circuit], [op], [theta_val + phi_val])
.result()
.gradients[0]
)
want_gradient = (
qiskit_grad.run([qiskit_circuit], [op], [theta_val + phi_val])
.result()
.gradients[0]
)
self.assertTrue(np.allclose(have_gradient, want_gradient))
@ddt
class TestQulacsEstimatorGradientWarningsAndErrors(TestCase):
"""Test Estimator Gradient"""
@data(*gradient_factories)
def test_gradient_with_parameter_expression_having_two_paramters(self, grad):
"""Test gradient when two different parameters are passed in a single expression"""
qiskit_circuit = QuantumCircuit(1)
theta_param = ParameterVector("ΞΈ", 2)
# Apply an instruction with a parameter expression involving two parameters.
qiskit_circuit.rx(3 * theta_param[0] + theta_param[1], 0)
op = SparsePauliOp.from_list([("Z", 1)])
est_grad = grad()
with pytest.raises(RuntimeError, match="Variable w.r.t should be given"):
est_grad.run([qiskit_circuit], [op], [[0.2, 0.3]]).result()
|
https://github.com/Tojarieh97/VQE
|
Tojarieh97
|
from openfermion.chem import MolecularData
from openfermion.transforms import get_fermion_operator, jordan_wigner
from openfermion.linalg import get_ground_state, get_sparse_operator
import numpy
import scipy
import scipy.linalg
# Load saved file for LiH.
diatomic_bond_length = 1.2
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
basis = 'sto-3g'
multiplicity = 1
# Set Hamiltonian parameters.
active_space_start = 1
active_space_stop = 3
# Generate and populate instance of MolecularData.
molecule = MolecularData(geometry, basis, multiplicity, description="1.2")
molecule.load()
# Get the Hamiltonian in an active space.
molecular_hamiltonian = molecule.get_molecular_hamiltonian(
occupied_indices=range(1),
active_indices=range(1, 2))
# Map operator to fermions and qubits.
fermion_hamiltonian = get_fermion_operator(molecular_hamiltonian)
qubit_hamiltonian = jordan_wigner(fermion_hamiltonian)
qubit_hamiltonian.compress()
print('The Jordan-Wigner Hamiltonian in canonical basis follows:\n{}'.format(qubit_hamiltonian))
# Get sparse operator and ground state energy.
sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)
energy, state = get_ground_state(sparse_hamiltonian)
print('Ground state energy before rotation is {} Hartree.\n'.format(energy))
# Randomly rotate.
n_orbitals = molecular_hamiltonian.n_qubits // 2
n_variables = int(n_orbitals * (n_orbitals - 1) / 2)
numpy.random.seed(1)
random_angles = numpy.pi * (1. - 2. * numpy.random.rand(n_variables))
kappa = numpy.zeros((n_orbitals, n_orbitals))
index = 0
for p in range(n_orbitals):
for q in range(p + 1, n_orbitals):
kappa[p, q] = random_angles[index]
kappa[q, p] = -numpy.conjugate(random_angles[index])
index += 1
# Build the unitary rotation matrix.
difference_matrix = kappa + kappa.transpose()
rotation_matrix = scipy.linalg.expm(kappa)
# Apply the unitary.
molecular_hamiltonian.rotate_basis(rotation_matrix)
# Get qubit Hamiltonian in rotated basis.
qubit_hamiltonian = jordan_wigner(molecular_hamiltonian)
qubit_hamiltonian.compress()
print('The Jordan-Wigner Hamiltonian in rotated basis follows:\n{}'.format(qubit_hamiltonian))
# Get sparse Hamiltonian and energy in rotated basis.
sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)
energy, state = get_ground_state(sparse_hamiltonian)
print('Ground state energy after rotation is {} Hartree.'.format(energy))
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the Scheduling/PadDelay passes"""
import unittest
from ddt import ddt, data, unpack
from qiskit import QuantumCircuit
from qiskit.circuit import Measure
from qiskit.circuit.library import CXGate, HGate
from qiskit.pulse import Schedule, Play, Constant, DriveChannel
from qiskit.test import QiskitTestCase
from qiskit.transpiler.instruction_durations import InstructionDurations
from qiskit.transpiler.passes import (
ASAPScheduleAnalysis,
ALAPScheduleAnalysis,
PadDelay,
SetIOLatency,
)
from qiskit.transpiler.passmanager import PassManager
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.transpiler.target import Target, InstructionProperties
@ddt
class TestSchedulingAndPaddingPass(QiskitTestCase):
"""Tests the Scheduling passes"""
def test_alap_agree_with_reverse_asap_reverse(self):
"""Test if ALAP schedule agrees with doubly-reversed ASAP schedule."""
qc = QuantumCircuit(2)
qc.h(0)
qc.delay(500, 1)
qc.cx(0, 1)
qc.measure_all()
durations = InstructionDurations(
[("h", 0, 200), ("cx", [0, 1], 700), ("measure", None, 1000)]
)
pm = PassManager([ALAPScheduleAnalysis(durations), PadDelay()])
alap_qc = pm.run(qc)
pm = PassManager([ASAPScheduleAnalysis(durations), PadDelay()])
new_qc = pm.run(qc.reverse_ops())
new_qc = new_qc.reverse_ops()
new_qc.name = new_qc.name
self.assertEqual(alap_qc, new_qc)
def test_alap_agree_with_reverse_asap_with_target(self):
"""Test if ALAP schedule agrees with doubly-reversed ASAP schedule."""
qc = QuantumCircuit(2)
qc.h(0)
qc.delay(500, 1)
qc.cx(0, 1)
qc.measure_all()
target = Target(num_qubits=2, dt=3.5555555555555554)
target.add_instruction(HGate(), {(0,): InstructionProperties(duration=200)})
target.add_instruction(CXGate(), {(0, 1): InstructionProperties(duration=700)})
target.add_instruction(
Measure(),
{
(0,): InstructionProperties(duration=1000),
(1,): InstructionProperties(duration=1000),
},
)
pm = PassManager([ALAPScheduleAnalysis(target=target), PadDelay()])
alap_qc = pm.run(qc)
pm = PassManager([ASAPScheduleAnalysis(target=target), PadDelay()])
new_qc = pm.run(qc.reverse_ops())
new_qc = new_qc.reverse_ops()
new_qc.name = new_qc.name
self.assertEqual(alap_qc, new_qc)
@data(ALAPScheduleAnalysis, ASAPScheduleAnalysis)
def test_classically_controlled_gate_after_measure(self, schedule_pass):
"""Test if ALAP/ASAP schedules circuits with c_if after measure with a common clbit.
See: https://github.com/Qiskit/qiskit-terra/issues/7654
(input)
βββ
q_0: β€Mββββββββββββ
ββ₯β βββββ
q_1: ββ«βββββ€ X ββββ
β βββ₯ββ
β ββββββ¨βββββ
c: 1/ββ©ββ‘ c_0 = T β
0 βββββββββββ
(scheduled)
βββββββββββββββββββββ
q_0: ββββββββββββββββββββ€Mββ€ Delay(200[dt]) β
βββββββββββββββββββββ₯ββββββββ¬ββββ¬βββββββ
q_1: β€ Delay(1000[dt]) βββ«ββββββββ€ X ββββββββ
βββββββββββββββββββ β βββ₯ββ
β ββββββ¨βββββ
c: 1/βββββββββββββββββββββ©βββββ‘ c_0=0x1 βββββ
0 βββββββββββ
"""
qc = QuantumCircuit(2, 1)
qc.measure(0, 0)
qc.x(1).c_if(0, True)
durations = InstructionDurations([("x", None, 200), ("measure", None, 1000)])
pm = PassManager([schedule_pass(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(2, 1)
expected.measure(0, 0)
expected.delay(1000, 1) # x.c_if starts after measure
expected.x(1).c_if(0, True)
expected.delay(200, 0)
self.assertEqual(expected, scheduled)
@data(ALAPScheduleAnalysis, ASAPScheduleAnalysis)
def test_measure_after_measure(self, schedule_pass):
"""Test if ALAP/ASAP schedules circuits with measure after measure with a common clbit.
See: https://github.com/Qiskit/qiskit-terra/issues/7654
(input)
ββββββββ
q_0: β€ X ββ€Mββββ
βββββββ₯ββββ
q_1: βββββββ«ββ€Mβ
β ββ₯β
c: 1/βββββββ©βββ©β
0 0
(scheduled)
βββββ ββββββββββββββββββββββ
q_0: ββββββββ€ X βββββββββ€Mββ€ Delay(1000[dt]) β
ββββββββ΄ββββ΄βββββββββ₯ββββββββββ¬ββ¬ββββββββ
q_1: β€ Delay(1200[dt]) βββ«ββββββββββ€Mβββββββββ
βββββββββββββββββββ β ββ₯β
c: 1/βββββββββββββββββββββ©βββββββββββ©βββββββββ
0 0
"""
qc = QuantumCircuit(2, 1)
qc.x(0)
qc.measure(0, 0)
qc.measure(1, 0)
durations = InstructionDurations([("x", None, 200), ("measure", None, 1000)])
pm = PassManager([schedule_pass(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(2, 1)
expected.x(0)
expected.measure(0, 0)
expected.delay(1200, 1)
expected.measure(1, 0)
expected.delay(1000, 0)
self.assertEqual(expected, scheduled)
@data(ALAPScheduleAnalysis, ASAPScheduleAnalysis)
def test_c_if_on_different_qubits(self, schedule_pass):
"""Test if ALAP/ASAP schedules circuits with `c_if`s on different qubits.
(input)
βββ
q_0: β€Mβββββββββββββββββββββββ
ββ₯β βββββ
q_1: ββ«βββββ€ X βββββββββββββββ
β βββ₯ββ βββββ
q_2: ββ«βββββββ«βββββββββ€ X ββββ
β β βββ₯ββ
β ββββββ¨βββββββββββ¨βββββ
c: 1/ββ©ββ‘ c_0 = T ββ‘ c_0 = T β
0 ββββββββββββββββββββββ
(scheduled)
βββββββββββββββββββββ
q_0: ββββββββββββββββββββ€Mββ€ Delay(200[dt]) ββββββββββββ
βββββββββββββββββββββ₯ββββββββ¬ββββ¬βββββββ
q_1: β€ Delay(1000[dt]) βββ«ββββββββ€ X βββββββββββββββββββ
βββββββββββββββββββ€ β βββ₯ββ βββββ
q_2: β€ Delay(1000[dt]) βββ«ββββββββββ«βββββββββββββ€ X ββββ
βββββββββββββββββββ β β βββ₯ββ
β ββββββ¨βββββ ββββββ¨βββββ
c: 1/βββββββββββββββββββββ©βββββ‘ c_0=0x1 ββββββ‘ c_0=0x1 β
0 βββββββββββ βββββββββββ
"""
qc = QuantumCircuit(3, 1)
qc.measure(0, 0)
qc.x(1).c_if(0, True)
qc.x(2).c_if(0, True)
durations = InstructionDurations([("x", None, 200), ("measure", None, 1000)])
pm = PassManager([schedule_pass(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(3, 1)
expected.measure(0, 0)
expected.delay(1000, 1)
expected.delay(1000, 2)
expected.x(1).c_if(0, True)
expected.x(2).c_if(0, True)
expected.delay(200, 0)
self.assertEqual(expected, scheduled)
@data(ALAPScheduleAnalysis, ASAPScheduleAnalysis)
def test_shorter_measure_after_measure(self, schedule_pass):
"""Test if ALAP/ASAP schedules circuits with shorter measure after measure with a common clbit.
(input)
βββ
q_0: β€Mββββ
ββ₯ββββ
q_1: ββ«ββ€Mβ
β ββ₯β
c: 1/ββ©βββ©β
0 0
(scheduled)
βββββββββββββββββββββ
q_0: ββββββββββββββββββββ€Mββ€ Delay(700[dt]) β
βββββββββββββββββββββ₯βββββββββ¬ββ¬ββββββββ
q_1: β€ Delay(1000[dt]) βββ«βββββββββ€Mβββββββββ
βββββββββββββββββββ β ββ₯β
c: 1/βββββββββββββββββββββ©ββββββββββ©βββββββββ
0 0
"""
qc = QuantumCircuit(2, 1)
qc.measure(0, 0)
qc.measure(1, 0)
durations = InstructionDurations([("measure", [0], 1000), ("measure", [1], 700)])
pm = PassManager([schedule_pass(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(2, 1)
expected.measure(0, 0)
expected.delay(1000, 1)
expected.measure(1, 0)
expected.delay(700, 0)
self.assertEqual(expected, scheduled)
@data(ALAPScheduleAnalysis, ASAPScheduleAnalysis)
def test_measure_after_c_if(self, schedule_pass):
"""Test if ALAP/ASAP schedules circuits with c_if after measure with a common clbit.
(input)
βββ
q_0: β€Mβββββββββββββββ
ββ₯β βββββ
q_1: ββ«βββββ€ X βββββββ
β βββ₯ββ βββ
q_2: ββ«βββββββ«ββββββ€Mβ
β ββββββ¨βββββββ₯β
c: 1/ββ©ββ‘ c_0 = T βββ©β
0 βββββββββββ 0
(scheduled)
ββββββββββββββββββββββ
q_0: ββββββββββββββββββββ€Mββ€ Delay(1000[dt]) βββββββββββββββββββ
βββββββββββββββββββββ₯βββββββββ¬ββββ¬βββββββββββββββββββββββββ
q_1: β€ Delay(1000[dt]) βββ«βββββββββ€ X βββββββββ€ Delay(800[dt]) β
βββββββββββββββββββ€ β βββ₯ββ ββββββββ¬ββ¬ββββββββ
q_2: β€ Delay(1000[dt]) βββ«βββββββββββ«βββββββββββββββββ€Mβββββββββ
βββββββββββββββββββ β ββββββ¨βββββ ββ₯β
c: 1/βββββββββββββββββββββ©ββββββ‘ c_0=0x1 ββββββββββββββ©βββββββββ
0 βββββββββββ 0
"""
qc = QuantumCircuit(3, 1)
qc.measure(0, 0)
qc.x(1).c_if(0, 1)
qc.measure(2, 0)
durations = InstructionDurations([("x", None, 200), ("measure", None, 1000)])
pm = PassManager([schedule_pass(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(3, 1)
expected.delay(1000, 1)
expected.delay(1000, 2)
expected.measure(0, 0)
expected.x(1).c_if(0, 1)
expected.measure(2, 0)
expected.delay(1000, 0)
expected.delay(800, 1)
self.assertEqual(expected, scheduled)
def test_parallel_gate_different_length(self):
"""Test circuit having two parallel instruction with different length.
(input)
ββββββββ
q_0: β€ X ββ€Mββββ
βββββ€ββ₯ββββ
q_1: β€ X βββ«ββ€Mβ
βββββ β ββ₯β
c: 2/βββββββ©βββ©β
0 1
(expected, ALAP)
ββββββββββββββββββββββββββ
q_0: β€ Delay(200[dt]) ββ€ X ββ€Mβ
βββββββ¬ββββ¬βββββββββ¬ββ¬βββ₯β
q_1: βββββββ€ X ββββββββββ€Mββββ«β
βββββ ββ₯β β
c: 2/βββββββββββββββββββββ©ββββ©β
1 0
(expected, ASAP)
ββββββββββββββββββββββββββ
q_0: β€ X ββ€Mββ€ Delay(200[dt]) β
βββββ€ββ₯βββββββββ¬ββ¬ββββββββ
q_1: β€ X βββ«βββββββββ€Mβββββββββ
βββββ β ββ₯β
c: 2/βββββββ©ββββββββββ©βββββββββ
0 1
"""
qc = QuantumCircuit(2, 2)
qc.x(0)
qc.x(1)
qc.measure(0, 0)
qc.measure(1, 1)
durations = InstructionDurations(
[("x", [0], 200), ("x", [1], 400), ("measure", None, 1000)]
)
pm = PassManager([ALAPScheduleAnalysis(durations), PadDelay()])
qc_alap = pm.run(qc)
alap_expected = QuantumCircuit(2, 2)
alap_expected.delay(200, 0)
alap_expected.x(0)
alap_expected.x(1)
alap_expected.measure(0, 0)
alap_expected.measure(1, 1)
self.assertEqual(qc_alap, alap_expected)
pm = PassManager([ASAPScheduleAnalysis(durations), PadDelay()])
qc_asap = pm.run(qc)
asap_expected = QuantumCircuit(2, 2)
asap_expected.x(0)
asap_expected.x(1)
asap_expected.measure(0, 0) # immediately start after X gate
asap_expected.measure(1, 1)
asap_expected.delay(200, 0)
self.assertEqual(qc_asap, asap_expected)
def test_parallel_gate_different_length_with_barrier(self):
"""Test circuit having two parallel instruction with different length with barrier.
(input)
ββββββββ
q_0: β€ X ββ€Mββββ
βββββ€ββ₯ββββ
q_1: β€ X βββ«ββ€Mβ
βββββ β ββ₯β
c: 2/βββββββ©βββ©β
0 1
(expected, ALAP)
βββββββββββββββββββββββ β βββ
q_0: β€ Delay(200[dt]) ββ€ X βββββ€Mββββ
βββββββ¬ββββ¬ββββββββββββ β ββ₯ββββ
q_1: βββββββ€ X ββββββββββββββββββ«ββ€Mβ
βββββ β β ββ₯β
c: 2/ββββββββββββββββββββββββββββ©βββ©β
0 1
(expected, ASAP)
βββββββββββββββββββββββ β βββ
q_0: β€ X ββ€ Delay(200[dt]) βββββ€Mββββ
βββββ€ββββββββββββββββββ β ββ₯ββββ
q_1: β€ X ββββββββββββββββββββββββ«ββ€Mβ
βββββ β β ββ₯β
c: 2/ββββββββββββββββββββββββββββ©βββ©β
0 1
"""
qc = QuantumCircuit(2, 2)
qc.x(0)
qc.x(1)
qc.barrier()
qc.measure(0, 0)
qc.measure(1, 1)
durations = InstructionDurations(
[("x", [0], 200), ("x", [1], 400), ("measure", None, 1000)]
)
pm = PassManager([ALAPScheduleAnalysis(durations), PadDelay()])
qc_alap = pm.run(qc)
alap_expected = QuantumCircuit(2, 2)
alap_expected.delay(200, 0)
alap_expected.x(0)
alap_expected.x(1)
alap_expected.barrier()
alap_expected.measure(0, 0)
alap_expected.measure(1, 1)
self.assertEqual(qc_alap, alap_expected)
pm = PassManager([ASAPScheduleAnalysis(durations), PadDelay()])
qc_asap = pm.run(qc)
asap_expected = QuantumCircuit(2, 2)
asap_expected.x(0)
asap_expected.delay(200, 0)
asap_expected.x(1)
asap_expected.barrier()
asap_expected.measure(0, 0)
asap_expected.measure(1, 1)
self.assertEqual(qc_asap, asap_expected)
def test_measure_after_c_if_on_edge_locking(self):
"""Test if ALAP/ASAP schedules circuits with c_if after measure with a common clbit.
The scheduler is configured to reproduce behavior of the 0.20.0,
in which clbit lock is applied to the end-edge of measure instruction.
See https://github.com/Qiskit/qiskit-terra/pull/7655
(input)
βββ
q_0: β€Mβββββββββββββββ
ββ₯β βββββ
q_1: ββ«βββββ€ X βββββββ
β βββ₯ββ βββ
q_2: ββ«βββββββ«ββββββ€Mβ
β ββββββ¨βββββββ₯β
c: 1/ββ©ββ‘ c_0 = T βββ©β
0 βββββββββββ 0
(ASAP scheduled)
βββββββββββββββββββββ
q_0: ββββββββββββββββββββ€Mββ€ Delay(200[dt]) ββββββββββββββββββββββ
βββββββββββββββββββββ₯ββββββββ¬ββββ¬βββββββ
q_1: β€ Delay(1000[dt]) βββ«ββββββββ€ X βββββββββββββββββββββββββββββ
βββββββββββββββββββ β βββ₯ββ βββββββββββββββββββββ
q_2: βββββββββββββββββββββ«ββββββββββ«ββββββββββ€Mββ€ Delay(200[dt]) β
β ββββββ¨βββββ ββ₯βββββββββββββββββββ
c: 1/βββββββββββββββββββββ©βββββ‘ c_0=0x1 βββββββ©βββββββββββββββββββ
0 βββββββββββ 0
(ALAP scheduled)
βββββββββββββββββββββ
q_0: ββββββββββββββββββββ€Mββ€ Delay(200[dt]) ββββ
βββββββββββββββββββββ₯ββββββββ¬ββββ¬βββββββ
q_1: β€ Delay(1000[dt]) βββ«ββββββββ€ X βββββββββββ
ββ¬βββββββββββββββββ€ β βββ₯ββ βββ
q_2: ββ€ Delay(200[dt]) βββ«ββββββββββ«ββββββββββ€Mβ
ββββββββββββββββββ β ββββββ¨βββββ ββ₯β
c: 1/βββββββββββββββββββββ©βββββ‘ c_0=0x1 βββββββ©β
0 βββββββββββ 0
"""
qc = QuantumCircuit(3, 1)
qc.measure(0, 0)
qc.x(1).c_if(0, 1)
qc.measure(2, 0)
durations = InstructionDurations([("x", None, 200), ("measure", None, 1000)])
# lock at the end edge
actual_asap = PassManager(
[
SetIOLatency(clbit_write_latency=1000),
ASAPScheduleAnalysis(durations),
PadDelay(),
]
).run(qc)
actual_alap = PassManager(
[
SetIOLatency(clbit_write_latency=1000),
ALAPScheduleAnalysis(durations),
PadDelay(),
]
).run(qc)
# start times of 2nd measure depends on ASAP/ALAP
expected_asap = QuantumCircuit(3, 1)
expected_asap.measure(0, 0)
expected_asap.delay(1000, 1)
expected_asap.x(1).c_if(0, 1)
expected_asap.measure(2, 0)
expected_asap.delay(200, 0)
expected_asap.delay(200, 2)
self.assertEqual(expected_asap, actual_asap)
expected_alap = QuantumCircuit(3, 1)
expected_alap.measure(0, 0)
expected_alap.delay(1000, 1)
expected_alap.x(1).c_if(0, 1)
expected_alap.delay(200, 2)
expected_alap.measure(2, 0)
expected_alap.delay(200, 0)
self.assertEqual(expected_alap, actual_alap)
@data([100, 200], [500, 0], [1000, 200])
@unpack
def test_active_reset_circuit(self, write_lat, cond_lat):
"""Test practical example of reset circuit.
Because of the stimulus pulse overlap with the previous XGate on the q register,
measure instruction is always triggered after XGate regardless of write latency.
Thus only conditional latency matters in the scheduling.
(input)
βββ βββββ βββ βββββ βββ βββββ
q: β€Mβββββ€ X βββββ€Mβββββ€ X βββββ€Mβββββ€ X ββββ
ββ₯β βββ₯ββ ββ₯β βββ₯ββ ββ₯β βββ₯ββ
β ββββββ¨βββββ β ββββββ¨βββββ β ββββββ¨βββββ
c: 1/ββ©ββ‘ c_0=0x1 βββ©ββ‘ c_0=0x1 βββ©ββ‘ c_0=0x1 β
0 βββββββββββ 0 βββββββββββ 0 βββββββββββ
"""
qc = QuantumCircuit(1, 1)
qc.measure(0, 0)
qc.x(0).c_if(0, 1)
qc.measure(0, 0)
qc.x(0).c_if(0, 1)
qc.measure(0, 0)
qc.x(0).c_if(0, 1)
durations = InstructionDurations([("x", None, 100), ("measure", None, 1000)])
actual_asap = PassManager(
[
SetIOLatency(clbit_write_latency=write_lat, conditional_latency=cond_lat),
ASAPScheduleAnalysis(durations),
PadDelay(),
]
).run(qc)
actual_alap = PassManager(
[
SetIOLatency(clbit_write_latency=write_lat, conditional_latency=cond_lat),
ALAPScheduleAnalysis(durations),
PadDelay(),
]
).run(qc)
expected = QuantumCircuit(1, 1)
expected.measure(0, 0)
if cond_lat > 0:
expected.delay(cond_lat, 0)
expected.x(0).c_if(0, 1)
expected.measure(0, 0)
if cond_lat > 0:
expected.delay(cond_lat, 0)
expected.x(0).c_if(0, 1)
expected.measure(0, 0)
if cond_lat > 0:
expected.delay(cond_lat, 0)
expected.x(0).c_if(0, 1)
self.assertEqual(expected, actual_asap)
self.assertEqual(expected, actual_alap)
def test_random_complicated_circuit(self):
"""Test scheduling complicated circuit with control flow.
(input)
ββββββββββββββββββ βββββ β βββββ Β»
q_0: β€ Delay(100[dt]) βββββ€ X βββββββββββββββββββββββββ€ X ββββΒ»
ββββββββββββββββββ βββ₯ββ β βββββ βββ₯ββ Β»
q_1: ββββββββββββββββββββββββ«βββββββββββββββ€ X ββββββββββ«βββββΒ»
β β βββ βββ₯ββ β Β»
q_2: ββββββββββββββββββββββββ«βββββββββ€Mβββββββ«βββββββββββ«βββββΒ»
ββββββ¨βββββ β ββ₯βββββββ¨βββββββββββ¨βββββΒ»
c: 1/βββββββββββββββββββ‘ c_0=0x1 ββββββ©ββ‘ c_0=0x0 ββ‘ c_0=0x0 βΒ»
βββββββββββ 0 ββββββββββββββββββββββΒ»
Β« βββββββββββββββββββββββ
Β«q_0: β€ Delay(300[dt]) ββ€ X βββββββ βββββ
Β« βββββββββββββββββββββββ βββ΄ββ
Β«q_1: βββββββββ ββββββββββββββββββ€ X ββββ
Β« βββ΄ββ βββ βββ₯ββ
Β«q_2: βββββββ€ X ββββββββββ€Mββββββββ«βββββ
Β« βββββ ββ₯β ββββββ¨βββββ
Β«c: 1/βββββββββββββββββββββ©βββ‘ c_0=0x0 β
Β« 0 βββββββββββ
(ASAP scheduled) duration = 2800 dt
ββββββββββββββββββ βββββ β βββββββββββββββββββ Β»
q_0: β€ Delay(200[dt]) βββββ€ X ββββββββ€ Delay(1400[dt]) ββββββββββββΒ»
ββββββββββββββββββ€ βββ₯ββ β βββββββββββββββββββ€ βββββ Β»
q_1: β€ Delay(300[dt]) βββββββ«βββββββββ€ Delay(1200[dt]) βββββ€ X ββββΒ»
ββββββββββββββββββ€ β β βββββββββ¬ββ¬ββββββββ βββ₯ββ Β»
q_2: β€ Delay(300[dt]) βββββββ«βββββββββββββββββ€Mβββββββββββββββ«βββββΒ»
ββββββββββββββββββββββββ¨βββββ β ββ₯β ββββββ¨βββββΒ»
c: 1/βββββββββββββββββββ‘ c_0=0x1 ββββββββββββββ©ββββββββββ‘ c_0=0x0 βΒ»
βββββββββββ 0 βββββββββββΒ»
Β« βββββ ββββββββββββββββββ βββββ Β»
Β«q_0: ββββββββββββββββββββββ€ X βββββ€ Delay(300[dt]) ββββββββ€ X ββββββββΒ»
Β« βββ₯ββ βββββββββββββββββββββββββ΄ββββ΄βββββββΒ»
Β«q_1: ββββββββββββββββββββββββ«ββββββββββββββ ββββββββββ€ Delay(400[dt]) βΒ»
Β« ββββββββββββββββββ β βββ΄ββ ββββββββββββββββββ€Β»
Β«q_2: β€ Delay(300[dt]) βββββββ«ββββββββββββ€ X βββββββββ€ Delay(300[dt]) βΒ»
Β« ββββββββββββββββββββββββ¨βββββ βββββ ββββββββββββββββββΒ»
Β«c: 1/βββββββββββββββββββ‘ c_0=0x0 βββββββββββββββββββββββββββββββββββββΒ»
Β« βββββββββββ Β»
Β« ββββββββββββββββββ
Β«q_0: ββββββ ββββββ€ Delay(700[dt]) β
Β« βββ΄ββ ββββββββββββββββββ€
Β«q_1: ββββ€ X βββββ€ Delay(700[dt]) β
Β« βββ₯ββ ββββββββ¬ββ¬ββββββββ
Β«q_2: ββββββ«βββββββββββββ€Mβββββββββ
Β« ββββββ¨βββββ ββ₯β
Β«c: 1/β‘ c_0=0x0 ββββββββββ©βββββββββ
Β« βββββββββββ 0
(ALAP scheduled) duration = 3100
ββββββββββββββββββ βββββ β βββββββββββββββββββ Β»
q_0: β€ Delay(200[dt]) βββββ€ X ββββββββ€ Delay(1400[dt]) ββββββββββββΒ»
ββββββββββββββββββ€ βββ₯ββ β βββββββββββββββββββ€ βββββ Β»
q_1: β€ Delay(300[dt]) βββββββ«βββββββββ€ Delay(1200[dt]) βββββ€ X ββββΒ»
ββββββββββββββββββ€ β β βββββββββ¬ββ¬ββββββββ βββ₯ββ Β»
q_2: β€ Delay(300[dt]) βββββββ«βββββββββββββββββ€Mβββββββββββββββ«βββββΒ»
ββββββββββββββββββββββββ¨βββββ β ββ₯β ββββββ¨βββββΒ»
c: 1/βββββββββββββββββββ‘ c_0=0x1 ββββββββββββββ©ββββββββββ‘ c_0=0x0 βΒ»
βββββββββββ 0 βββββββββββΒ»
Β« βββββ ββββββββββββββββββ βββββ Β»
Β«q_0: ββββββββββββββββββββββ€ X βββββ€ Delay(300[dt]) ββββββββ€ X ββββββββΒ»
Β« ββββββββββββββββββ βββ₯ββ βββββββββββββββββββββββββ΄ββββ΄βββββββΒ»
Β«q_1: β€ Delay(300[dt]) βββββββ«ββββββββββββββ ββββββββββ€ Delay(100[dt]) βΒ»
Β« ββββββββββββββββββ€ β βββ΄ββ ββββββββ¬ββ¬ββββββββΒ»
Β«q_2: β€ Delay(600[dt]) βββββββ«ββββββββββββ€ X ββββββββββββββββ€MβββββββββΒ»
Β« ββββββββββββββββββββββββ¨βββββ βββββ ββ₯β Β»
Β«c: 1/βββββββββββββββββββ‘ c_0=0x0 ββββββββββββββββββββββββββββ©βββββββββΒ»
Β« βββββββββββ 0 Β»
Β« ββββββββββββββββββ
Β«q_0: ββββββ ββββββ€ Delay(700[dt]) β
Β« βββ΄ββ ββββββββββββββββββ€
Β«q_1: ββββ€ X βββββ€ Delay(700[dt]) β
Β« βββ₯ββ ββββββββββββββββββ
Β«q_2: ββββββ«βββββββββββββββββββββββ
Β« ββββββ¨βββββ
Β«c: 1/β‘ c_0=0x0 βββββββββββββββββββ
Β« βββββββββββ
"""
qc = QuantumCircuit(3, 1)
qc.delay(100, 0)
qc.x(0).c_if(0, 1)
qc.barrier()
qc.measure(2, 0)
qc.x(1).c_if(0, 0)
qc.x(0).c_if(0, 0)
qc.delay(300, 0)
qc.cx(1, 2)
qc.x(0)
qc.cx(0, 1).c_if(0, 0)
qc.measure(2, 0)
durations = InstructionDurations(
[("x", None, 100), ("measure", None, 1000), ("cx", None, 200)]
)
actual_asap = PassManager(
[
SetIOLatency(clbit_write_latency=100, conditional_latency=200),
ASAPScheduleAnalysis(durations),
PadDelay(),
]
).run(qc)
actual_alap = PassManager(
[
SetIOLatency(clbit_write_latency=100, conditional_latency=200),
ALAPScheduleAnalysis(durations),
PadDelay(),
]
).run(qc)
expected_asap = QuantumCircuit(3, 1)
expected_asap.delay(200, 0) # due to conditional latency of 200dt
expected_asap.delay(300, 1)
expected_asap.delay(300, 2)
expected_asap.x(0).c_if(0, 1)
expected_asap.barrier()
expected_asap.delay(1400, 0)
expected_asap.delay(1200, 1)
expected_asap.measure(2, 0)
expected_asap.x(1).c_if(0, 0)
expected_asap.x(0).c_if(0, 0)
expected_asap.delay(300, 0)
expected_asap.x(0)
expected_asap.delay(300, 2)
expected_asap.cx(1, 2)
expected_asap.delay(400, 1)
expected_asap.cx(0, 1).c_if(0, 0)
expected_asap.delay(700, 0) # creg is released at t0 of cx(0,1).c_if(0,0)
expected_asap.delay(
700, 1
) # no creg write until 100dt. thus measure can move left by 300dt.
expected_asap.delay(300, 2)
expected_asap.measure(2, 0)
self.assertEqual(expected_asap, actual_asap)
self.assertEqual(actual_asap.duration, 3100)
expected_alap = QuantumCircuit(3, 1)
expected_alap.delay(200, 0) # due to conditional latency of 200dt
expected_alap.delay(300, 1)
expected_alap.delay(300, 2)
expected_alap.x(0).c_if(0, 1)
expected_alap.barrier()
expected_alap.delay(1400, 0)
expected_alap.delay(1200, 1)
expected_alap.measure(2, 0)
expected_alap.x(1).c_if(0, 0)
expected_alap.x(0).c_if(0, 0)
expected_alap.delay(300, 0)
expected_alap.x(0)
expected_alap.delay(300, 1)
expected_alap.delay(600, 2)
expected_alap.cx(1, 2)
expected_alap.delay(100, 1)
expected_alap.cx(0, 1).c_if(0, 0)
expected_alap.measure(2, 0)
expected_alap.delay(700, 0)
expected_alap.delay(700, 1)
self.assertEqual(expected_alap, actual_alap)
self.assertEqual(actual_alap.duration, 3100)
def test_dag_introduces_extra_dependency_between_conditionals(self):
"""Test dependency between conditional operations in the scheduling.
In the below example circuit, the conditional x on q1 could start at time 0,
however it must be scheduled after the conditional x on q0 in ASAP scheduling.
That is because circuit model used in the transpiler passes (DAGCircuit)
interprets instructions acting on common clbits must be run in the order
given by the original circuit (QuantumCircuit).
(input)
ββββββββββββββββββ βββββ
q_0: β€ Delay(100[dt]) βββββ€ X ββββ
βββββββ¬ββββ¬βββββββ βββ₯ββ
q_1: βββββββ€ X ββββββββββββββ«βββββ
βββ₯ββ β
ββββββ¨βββββ ββββββ¨βββββ
c: 1/ββββ‘ c_0=0x1 ββββββ‘ c_0=0x1 β
βββββββββββ βββββββββββ
(ASAP scheduled)
ββββββββββββββββββ βββββ
q_0: β€ Delay(100[dt]) βββββ€ X βββββββββββββββ
ββββββββββββββββββ€ βββ₯ββ βββββ
q_1: β€ Delay(100[dt]) βββββββ«βββββββββ€ X ββββ
ββββββββββββββββββ β βββ₯ββ
ββββββ¨βββββββββββ¨βββββ
c: 1/βββββββββββββββββββ‘ c_0=0x1 ββ‘ c_0=0x1 β
ββββββββββββββββββββββ
"""
qc = QuantumCircuit(2, 1)
qc.delay(100, 0)
qc.x(0).c_if(0, True)
qc.x(1).c_if(0, True)
durations = InstructionDurations([("x", None, 160)])
pm = PassManager([ASAPScheduleAnalysis(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(2, 1)
expected.delay(100, 0)
expected.delay(100, 1) # due to extra dependency on clbits
expected.x(0).c_if(0, True)
expected.x(1).c_if(0, True)
self.assertEqual(expected, scheduled)
def test_scheduling_with_calibration(self):
"""Test if calibrated instruction can update node duration."""
qc = QuantumCircuit(2)
qc.x(0)
qc.cx(0, 1)
qc.x(1)
qc.cx(0, 1)
xsched = Schedule(Play(Constant(300, 0.1), DriveChannel(0)))
qc.add_calibration("x", (0,), xsched)
durations = InstructionDurations([("x", None, 160), ("cx", None, 600)])
pm = PassManager([ASAPScheduleAnalysis(durations), PadDelay()])
scheduled = pm.run(qc)
expected = QuantumCircuit(2)
expected.x(0)
expected.delay(300, 1)
expected.cx(0, 1)
expected.x(1)
expected.delay(160, 0)
expected.cx(0, 1)
expected.add_calibration("x", (0,), xsched)
self.assertEqual(expected, scheduled)
def test_padding_not_working_without_scheduling(self):
"""Test padding fails when un-scheduled DAG is input."""
qc = QuantumCircuit(1, 1)
qc.delay(100, 0)
qc.x(0)
qc.measure(0, 0)
with self.assertRaises(TranspilerError):
PassManager(PadDelay()).run(qc)
def test_no_pad_very_end_of_circuit(self):
"""Test padding option that inserts no delay at the very end of circuit.
This circuit will be unchanged after ASAP-schedule/padding.
βββββββββββββββββββββ
q_0: β€ Delay(100[dt]) ββ€Mβ
βββββββ¬ββββ¬βββββββββ₯β
q_1: βββββββ€ X ββββββββββ«β
βββββ β
c: 1/ββββββββββββββββββββ©β
0
"""
qc = QuantumCircuit(2, 1)
qc.delay(100, 0)
qc.x(1)
qc.measure(0, 0)
durations = InstructionDurations([("x", None, 160), ("measure", None, 1000)])
scheduled = PassManager(
[
ASAPScheduleAnalysis(durations),
PadDelay(fill_very_end=False),
]
).run(qc)
self.assertEqual(scheduled, qc)
@data(ALAPScheduleAnalysis, ASAPScheduleAnalysis)
def test_respect_target_instruction_constraints(self, schedule_pass):
"""Test if DD pass does not pad delays for qubits that do not support delay instructions.
See: https://github.com/Qiskit/qiskit-terra/issues/9993
"""
qc = QuantumCircuit(3)
qc.cx(1, 2)
target = Target(dt=1)
target.add_instruction(CXGate(), {(1, 2): InstructionProperties(duration=1000)})
# delays are not supported
pm = PassManager([schedule_pass(target=target), PadDelay(target=target)])
scheduled = pm.run(qc)
self.assertEqual(qc, scheduled)
if __name__ == "__main__":
unittest.main()
|
https://github.com/theflyingrahul/qiskitsummerschool2020
|
theflyingrahul
|
!pip install -U -r grading_tools/requirements.txt
from IPython.display import clear_output
clear_output()
import numpy as np; pi = np.pi
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
from copy import deepcopy as make_copy
def prepare_hets_circuit(depth, angle1, angle2):
hets_circ = QuantumCircuit(depth)
hets_circ.ry(angle1, 0)
hets_circ.rz(angle1, 0)
hets_circ.ry(angle1, 1)
hets_circ.rz(angle1, 1)
for ii in range(depth):
hets_circ.cx(0,1)
hets_circ.ry(angle2,0)
hets_circ.rz(angle2,0)
hets_circ.ry(angle2,1)
hets_circ.rz(angle2,1)
return hets_circ
hets_circuit = prepare_hets_circuit(2, pi/2, pi/2)
hets_circuit.draw()
def measure_zz_circuit(given_circuit):
zz_meas = make_copy(given_circuit)
zz_meas.measure_all()
return zz_meas
zz_meas = measure_zz_circuit(hets_circuit)
zz_meas.draw()
simulator = Aer.get_backend('qasm_simulator')
result = execute(zz_meas, backend = simulator, shots=10000).result()
counts = result.get_counts(zz_meas)
plot_histogram(counts)
def measure_zz(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
zz = counts['00'] + counts['11'] - counts['01'] - counts['10']
zz = zz / total_counts
return zz
zz = measure_zz(hets_circuit)
print("<ZZ> =", str(zz))
def measure_zi(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
zi = counts['00'] - counts['11'] + counts['01'] - counts['10']
zi = zi / total_counts
return zi
def measure_iz(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
iz = counts['00'] - counts['11'] - counts['01'] + counts['10']
iz = iz / total_counts
return iz
zi = measure_zi(hets_circuit)
print("<ZI> =", str(zi))
iz = measure_iz(hets_circuit)
print("<IZ> =", str(iz))
def measure_xx_circuit(given_circuit):
xx_meas = make_copy(given_circuit)
### WRITE YOUR CODE BETWEEN THESE LINES - START
xx_meas.h(0)
xx_meas.h(1)
xx_meas.measure_all()
### WRITE YOUR CODE BETWEEN THESE LINES - END
return xx_meas
xx_meas = measure_xx_circuit(hets_circuit)
xx_meas.draw()
def measure_xx(given_circuit, num_shots = 10000):
xx_meas = measure_xx_circuit(given_circuit)
result = execute(xx_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(xx_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
xx = counts['00'] + counts['11'] - counts['01'] - counts['10']
xx = xx / total_counts
return xx
xx = measure_xx(hets_circuit)
print("<XX> =", str(xx))
def get_energy(given_circuit, num_shots = 10000):
zz = measure_zz(given_circuit, num_shots = num_shots)
iz = measure_iz(given_circuit, num_shots = num_shots)
zi = measure_zi(given_circuit, num_shots = num_shots)
xx = measure_xx(given_circuit, num_shots = num_shots)
energy = (-1.0523732)*1 + (0.39793742)*iz + (-0.3979374)*zi + (-0.0112801)*zz + (0.18093119)*xx
return energy
energy = get_energy(hets_circuit)
print("The energy of the trial state is", str(energy))
hets_circuit_plus = None
hets_circuit_minus = None
### WRITE YOUR CODE BETWEEN THESE LINES - START
hets_circuit_plus = prepare_hets_circuit(2, pi/2 + 0.1*pi/2, pi/2)
hets_circuit_minus = prepare_hets_circuit(2, pi/2 - 0.1*pi/2, pi/2)
### WRITE YOUR CODE BETWEEN THESE LINES - END
energy_plus = get_energy(hets_circuit_plus, num_shots=100000)
energy_minus = get_energy(hets_circuit_minus, num_shots=100000)
print(energy_plus, energy_minus)
name = 'Pon Rahul M'
email = 'ponrahul.21it@licet.ac.in'
### Do not change the lines below
from grading_tools import grade
grade(answer=measure_xx_circuit(hets_circuit), name=name, email=email, labid='lab9', exerciseid='ex1')
grade(answer=hets_circuit_plus, name=name, email=email, labid='lab9', exerciseid='ex2')
grade(answer=hets_circuit_minus, name=name, email=email, labid='lab9', exerciseid='ex3')
energy_plus_100, energy_plus_1000, energy_plus_10000 = 0, 0, 0
energy_minus_100, energy_minus_1000, energy_minus_10000 = 0, 0, 0
### WRITE YOUR CODE BETWEEN THESE LINES - START
energy_plus_100 = get_energy(hets_circuit_plus, num_shots = 100)
energy_minus_100 = get_energy(hets_circuit_minus, num_shots = 100)
energy_plus_1000 = get_energy(hets_circuit_plus, num_shots = 1000)
energy_minus_1000 = get_energy(hets_circuit_minus, num_shots = 1000)
energy_plus_10000 = get_energy(hets_circuit_plus, num_shots = 10000)
energy_minus_10000 = get_energy(hets_circuit_minus, num_shots = 10000)
### WRITE YOUR CODE BETWEEN THESE LINES - END
print(energy_plus_100, energy_minus_100, "difference = ", energy_minus_100 - energy_plus_100)
print(energy_plus_1000, energy_minus_1000, "difference = ", energy_minus_1000 - energy_plus_1000)
print(energy_plus_10000, energy_minus_10000, "difference = ", energy_minus_10000 - energy_plus_10000)
### WRITE YOUR CODE BETWEEN THESE LINES - START
I = np.array([
[1, 0],
[0, 1]
])
X = np.array([
[0, 1],
[1, 0]
])
Z = np.array([
[1, 0],
[0, -1]
])
h2_hamiltonian = (-1.0523732) * np.kron(I, I) + \
(0.39793742) * np.kron(I, Z) + \
(-0.3979374) * np.kron(Z, I) + \
(-0.0112801) * np.kron(Z, Z) + \
(0.18093119) * np.kron(X, X)
from numpy import linalg as LA
eigenvalues, eigenvectors = LA.eig(h2_hamiltonian)
for ii, eigenvalue in enumerate(eigenvalues):
print(f"Eigenvector {eigenvectors[:,ii]} has energy {eigenvalue}")
exact_eigenvector = eigenvectors[:,np.argmin(eigenvalues)]
exact_eigenvalue = np.min(eigenvalues)
print()
print("Minimum energy is", exact_eigenvalue)
### WRITE YOUR CODE BETWEEN THESE LINES - END
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit.utils import algorithm_globals
from qiskit.algorithms.minimum_eigensolvers import QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import COBYLA
from qiskit.primitives import Sampler
from qiskit_optimization.applications.vertex_cover import VertexCover
import networkx as nx
seed = 123
algorithm_globals.random_seed = seed
graph = nx.random_regular_graph(d=3, n=6, seed=seed)
pos = nx.spring_layout(graph, seed=seed)
prob = VertexCover(graph)
prob.draw(pos=pos)
qp = prob.to_quadratic_program()
print(qp.prettyprint())
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
prob.draw(result, pos=pos)
# QAOA
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, sampler=Sampler(), optimizer=COBYLA()))
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
print("\ntime:", result.min_eigen_solver_result.optimizer_time)
prob.draw(result, pos=pos)
from qiskit_optimization.applications import Knapsack
prob = Knapsack(values=[3, 4, 5, 6, 7], weights=[2, 3, 4, 5, 6], max_weight=10)
qp = prob.to_quadratic_program()
print(qp.prettyprint())
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
# QAOA
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, sampler=Sampler(), optimizer=COBYLA()))
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
print("\ntime:", result.min_eigen_solver_result.optimizer_time)
from qiskit_optimization.converters import QuadraticProgramToQubo
# the same knapsack problem instance as in the previous section
prob = Knapsack(values=[3, 4, 5, 6, 7], weights=[2, 3, 4, 5, 6], max_weight=10)
qp = prob.to_quadratic_program()
print(qp.prettyprint())
# intermediate QUBO form of the optimization problem
conv = QuadraticProgramToQubo()
qubo = conv.convert(qp)
print(qubo.prettyprint())
# qubit Hamiltonian and offset
op, offset = qubo.to_ising()
print(f"num qubits: {op.num_qubits}, offset: {offset}\n")
print(op)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Base classes for an approximate circuit definition."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Optional, SupportsFloat
import numpy as np
from qiskit import QuantumCircuit
class ApproximateCircuit(QuantumCircuit, ABC):
"""A base class that represents an approximate circuit."""
def __init__(self, num_qubits: int, name: Optional[str] = None) -> None:
"""
Args:
num_qubits: number of qubit this circuit will span.
name: a name of the circuit.
"""
super().__init__(num_qubits, name=name)
@property
@abstractmethod
def thetas(self) -> np.ndarray:
"""
The property is not implemented and raises a ``NotImplementedException`` exception.
Returns:
a vector of parameters of this circuit.
"""
raise NotImplementedError
@abstractmethod
def build(self, thetas: np.ndarray) -> None:
"""
Constructs this circuit out of the parameters(thetas). Parameter values must be set before
constructing the circuit.
Args:
thetas: a vector of parameters to be set in this circuit.
"""
raise NotImplementedError
class ApproximatingObjective(ABC):
"""
A base class for an optimization problem definition. An implementing class must provide at least
an implementation of the ``objective`` method. In such case only gradient free optimizers can
be used. Both method, ``objective`` and ``gradient``, preferable to have in an implementation.
"""
def __init__(self) -> None:
# must be set before optimization
self._target_matrix: np.ndarray | None = None
@abstractmethod
def objective(self, param_values: np.ndarray) -> SupportsFloat:
"""
Computes a value of the objective function given a vector of parameter values.
Args:
param_values: a vector of parameter values for the optimization problem.
Returns:
a float value of the objective function.
"""
raise NotImplementedError
@abstractmethod
def gradient(self, param_values: np.ndarray) -> np.ndarray:
"""
Computes a gradient with respect to parameters given a vector of parameter values.
Args:
param_values: a vector of parameter values for the optimization problem.
Returns:
an array of gradient values.
"""
raise NotImplementedError
@property
def target_matrix(self) -> np.ndarray:
"""
Returns:
a matrix being approximated
"""
return self._target_matrix
@target_matrix.setter
def target_matrix(self, target_matrix: np.ndarray) -> None:
"""
Args:
target_matrix: a matrix to approximate in the optimization procedure.
"""
self._target_matrix = target_matrix
@property
@abstractmethod
def num_thetas(self) -> int:
"""
Returns:
the number of parameters in this optimization problem.
"""
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.