repo
stringclasses 885
values | file
stringclasses 741
values | content
stringlengths 4
215k
|
|---|---|---|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw('mpl')
|
https://github.com/matheusmtta/Quantum-Computing
|
matheusmtta
|
import numpy as np
from numpy import pi
# importing Qiskit
from qiskit import QuantumCircuit, transpile, assemble, Aer
from qiskit.visualization import plot_histogram, plot_bloch_multivector
qc = QuantumCircuit(3)
qc.h(2)
qc.cp(pi/2, 1, 2)
qc.cp(pi/4, 0, 2)
qc.h(1)
qc.cp(pi/2, 0, 1)
qc.h(0)
qc.swap(0, 2)
qc.draw()
def qft_rotations(circuit, n):
if n == 0:
return circuit
n -= 1
circuit.h(n)
for qubit in range(n):
circuit.cp(pi/2**(n-qubit), qubit, n)
qft_rotations(circuit, n)
def swap_registers(circuit, n):
for qubit in range(n//2):
circuit.swap(qubit, n-qubit-1)
return circuit
def qft(circuit, n):
qft_rotations(circuit, n)
swap_registers(circuit, n)
return circuit
qc = QuantumCircuit(4)
qft(qc,4)
qc.draw()
# Create the circuit
qc = QuantumCircuit(3)
# Encode the state 5 (101 in binary)
qc.x(0)
qc.x(2)
qc.draw()
sim = Aer.get_backend("aer_simulator")
qc_init = qc.copy()
qc_init.save_statevector()
statevector = sim.run(qc_init).result().get_statevector()
plot_bloch_multivector(statevector)
qft(qc, 3)
qc.draw()
qc.save_statevector()
statevector = sim.run(qc).result().get_statevector()
plot_bloch_multivector(statevector)
|
https://github.com/fvarchon/qiskit-intros
|
fvarchon
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, Aer, execute
from qiskit_aqua.algorithms import Grover
from qiskit.ignis.verification.randomized_benchmarking import RBFitter
from qiskit_chemistry import QiskitChemistry
api_token = ''
url = ''
from qiskit import IBMQ
IBMQ.save_account(api_token, url)
IBMQ.load_accounts()
IBMQ.backends()
from qiskit.tools.jupyter import *
%qiskit_backend_overview
|
https://github.com/acfilok96/Quantum-Computation
|
acfilok96
|
import qiskit
from qiskit import *
print(qiskit.__version__)
%matplotlib inline
from qiskit.tools.visualization import plot_histogram
from ibm_quantum_widgets import draw_circuit
# initialize two qubits in the zero state and
# two classical bits in the zero state in the quantum circuit
circuit = QuantumCircuit(3,3)
# C gate
# (2) => q2
circuit.x(2)
circuit.draw(output='mpl')
draw_circuit(circuit)
# Hadamard (H) gate
# (0) => (q0)
circuit.h(0)
circuit.draw(output='mpl')
draw_circuit(circuit)
# A controlled-NOT (CX NOT) gate, on control qubit 0
# and target qubit 1, putting the qubits in an entangled state.
# (2,1) = > q2(control bit)(quantum bit)(origin) --> q1(target bit)(quantum bit)(destination)
circuit.cx(2,1)
# (0,2) = > q0(control bit)(quantum bit)(origin) --> q2(target bit)(quantum bit)(destination)
circuit.cx(0,2)
circuit.draw(output='mpl')
draw_circuit(circuit)
# measurement between quantum state and classical state
# [1,1] => [q1,q1](quantum bit) and [2,0] => [c2,c0](classical bit)
circuit.measure([1,1],[2,0])
circuit.draw(output='mpl')
draw_circuit(circuit)
# The n qubit’s measurement result will be stored in the n classical bit
circuit.measure([1,1,2],[2,0,0])
circuit.draw(output='mpl')
draw_circuit(circuit)
simulator = Aer.get_backend("qasm_simulator")
job = execute(circuit, simulator, shots=2024)
result = job.result()
counts = result.get_counts(circuit)
print("Total count for 100 and 101 are: ", counts)
# 1.
plot_histogram(counts)
# 2.
from ibm_quantum_widgets import draw_circuit
draw_circuit(circuit)
|
https://github.com/jonasmaziero/computacao_quantica_qiskit
|
jonasmaziero
|
%run init.ipynb
lbda = symbols('lambda', real=True); E0 = Matrix([[1,0],[0,sqrt(1-lbda)]]); E0
# coefficients of the expansion of E0 in the Pauli basis
c0 = trace((id(2)/sqrt(2))*E0); c1 = trace((pauli(1)/sqrt(2))*E0)
c2 = trace((pauli(2)/sqrt(2))*E0); c3 = trace((pauli(3)/sqrt(2))*E0)
simplify(c0), simplify(c1), simplify(c2), simplify(c3)
E1 = Matrix([[0,0],[0,sqrt(lbda)]]); E1
# coefficients for the expansion of E1 in the Pauli basis
c0 = trace((id(2)/sqrt(2))*E1); c1 = trace((pauli(1)/sqrt(2))*E1)
c2 = trace((pauli(2)/sqrt(2))*E1); c3 = trace((pauli(3)/sqrt(2))*E1)
simplify(c0), simplify(c1), simplify(c2), simplify(c3)
V00,V10,V20,V30,W00,W01,W02,W03,W10,W11,W12,W13=symbols('V00 V10 V20 V30 W00 W01 W02 W03 W10 W11 W12 W13',real=True)
#nonlinsolve([V00**2+V10**2+V20**2+V30**2-1, W00**2+W01**2+W02**2+W03**2-1,
# W10**2+W11**2+W12**2+W13**2-1,W10*W00+W11*W01+W12*W02+W13*W03,
# W00*V00-(1+sqrt(1-lbda))/2, W01*V10, W02*V20, W03*V30-(1-sqrt(1-lbda))/2,
# W10*V00-sqrt(lbda)/2, W11*V10, W12*V20, W13*V30+sqrt(lbda)/2],
# [V00,V10,V20,V30,W00,W01,W02,W03,W10,W11,W12,W13])
# took too long for solving (did not return any result)
V = Matrix([[sqrt((1+sqrt(1-lbda))/2),0,0,sqrt((1-sqrt(1-lbda))/2)],[0,1,0,0],[0,0,1,0],
[sqrt((1-sqrt(1-lbda))/2),0,0,-sqrt((1+sqrt(1-lbda))/2)]])
V
V*V.T, V.T*V # verification of unitarity
W = Matrix([[0,1,0,0],[sqrt((1-sqrt(1-lbda))/2),0,0,sqrt((1+sqrt(1-lbda))/2)],[0,0,1,0],
[sqrt((1+sqrt(1-lbda))/2),0,0,-sqrt((1-sqrt(1-lbda))/2)]])
W
W*W.T, W.T*W # verification of unitarity
r00,r01,r10,r11 = symbols('r00,r01,r10,r11',real=True)
rhoS0 = Matrix([[r00,r01],[r10,r11]])
rhoS0 # initial state
rhoS = E0*rhoS0*E0 + E1*rhoS0*E1; simplify(rhoS) # evolved state using the Kraus' operators
# Outside these functions, initialize: rhos = zeros(ds,ds), s=A,B
def ptraceA(da, db, rho):
rhoB = zeros(db,db)
for j in range(0, db):
for k in range(0, db):
for l in range(0, da):
rhoB[j,k] += rho[l*db+j,l*db+k]
return rhoB
def ptraceB(da, db, rho):
rhoA = zeros(da,da)
for j in range(0, da):
for k in range(0, da):
for l in range(0, db):
rhoA[j,k] += rho[j*db+l,k*db+l]
return rhoA
# tests the circuit for the PDC
rhoSA0 = tp(rhoS0,proj(cb(4,0)))#; rhoSA0
rhoSA1 = tp(id(2),V)*rhoSA0*tp(id(2),V.T)#; rhoSA1
Uc = tp(id(2),proj(cb(4,0))) + tp(id(2),proj(cb(4,1))) + tp(id(2),proj(cb(4,2))) + tp(pauli(3),proj(cb(4,3)))
#Uc*Uc.T, Uc.T*Uc
rhoSA2 = Uc*rhoSA1*Uc.T; #rhoSA2
rhoSA3 = tp(id(2),W)*rhoSA2*tp(id(2),W.T) # Não precisava pois só mudará a base de A
rhoS = ptraceB(2, 4, rhoSA3); simplify(rhoS) # ok!
V = Matrix([[sqrt((1+sqrt(1-lbda))/2),sqrt((1-sqrt(1-lbda))/2)],
[sqrt((1-sqrt(1-lbda))/2),-sqrt((1+sqrt(1-lbda))/2)]])
W = V; V
# tests the (http://arxiv.org/abs/1704.05593) circuit
rhoSA0 = tp(rhoS0,proj(cb(2,0))); rhoSA0
rhoSA1 = tp(id(2),V)*rhoSA0*tp(id(2),V.T); rhoSA1
Uc = tp(id(2),proj(cb(2,0))) + tp(pauli(3),proj(cb(2,1))); Uc, Uc*Uc.T, Uc.T*Uc
rhoSA2 = Uc*rhoSA1*Uc.T; rhoSA2
rhoSA3 = tp(id(2),W)*rhoSA2*tp(id(2),W.T)
rhoS = ptraceB(2, 2, rhoSA3); simplify(rhoS) # also works!
lb = np.arange(0,1.05,0.05); th = 2*np.arccos(np.sqrt((1+np.sqrt(1-lb))/2))
th_ = 2*np.arcsin(np.sqrt((1-np.sqrt(1-lb))/2))
plt.plot(lb,th); plt.plot(lb, th_, '*')
plt.xlabel(r'$\lambda$'); plt.ylabel(r'$\theta$'); plt.show()
# phase damping channel, exact
def phase_damping(rho, lb):
E0 = Matrix([[1,0],[0,sqrt(1-lb)]]); E1 = Matrix([[0,0],[0,sqrt(lb)]])
return E0*rho*E0 + E1*rho*E1
lb = symbols('lambda')
psi = (cb(2,0)+cb(2,1))/sqrt(2); rho = proj(psi); rhopd = phase_damping(rho, lb)
rho, rhopd
def coh_l1(rho):
d = rho.shape[0]; C = 0
for j in range(0, d-1):
for k in range(j+1, d):
C += abs(rho[j,k])
return 2*C
coh = coh_l1(rhopd); coh
# simulation
from qiskit import *
def state_prep():
qr = QuantumRegister(1); qc = QuantumCircuit(qr, name = 'state_prep')
qc.h(qr[0])
return qc
qc_sp = state_prep(); qc_sp.draw()
def qc_sim_pd(lb):
qr = QuantumRegister(2); qc = QuantumCircuit(qr, name = 'pd_sim')
th = 2*math.acos(math.sqrt((1+math.sqrt(1-lb))/2))
qc.u(th, 0, math.pi, qr[1]); qc.cz(qr[1],qr[0]); qc.u(th, 0, math.pi, qr[1])
return qc
lb = 0.5; qc_pd = qc_sim_pd(lb); qc_pd.draw()
qr = QuantumRegister(2); qc = QuantumCircuit(qr)
qc_sp = state_prep(); qc.append(qc_sp, [qr[0]]); qc.barrier()
lb = 0; qc_pd = qc_sim_pd(lb); qc.append(qc_pd, [qr[0],qr[1]]); qc.barrier()
qc.draw()
from qiskit.ignis.verification.tomography import state_tomography_circuits, StateTomographyFitter
nshots = 8192
qiskit.IBMQ.load_account()
provider = qiskit.IBMQ.get_provider(hub='ibm-q', group='open', project='main')
device = provider.get_backend('ibmq_belem')
from qiskit.tools.monitor import job_monitor
lb = np.arange(0,1.05,0.05); d = lb.shape[0]; Cteo = np.zeros(d); Csim = np.zeros(d); Cexp = np.zeros(d)
for j in range(0, d):
Cteo[j] = np.abs(math.sqrt(1-lb[j])) # theoretical
qr = QuantumRegister(2); qc = QuantumCircuit(qr)
qc_sp = state_prep(); qc.append(qc_sp, [qr[0]]) # state preparation
qc_pd = qc_sim_pd(lb[j]); qc.append(qc_pd, [qr[0],qr[1]]) # apply sim phase damping
qstc = state_tomography_circuits(qc, qr[0]) # circuit for state tomography
job = qiskit.execute(qstc, Aer.get_backend('qasm_simulator'), shots=nshots)
qstf = StateTomographyFitter(job.result(), qstc); rho = qstf.fit(method='lstsq')
Csim[j] = coh_l1(rho)
# 1st I did the simulation, only afterwards I added the code for the experiments
job = qiskit.execute(qstc, backend = device, shots = nshots)
print(job.job_id()); job_monitor(job)
qstf = StateTomographyFitter(job.result(), qstc); rho = qstf.fit(method='lstsq')
Cexp[j] = coh_l1(rho)
matplotlib.rcParams.update({'font.size':12}); plt.figure(figsize = (6,4), dpi = 100)
plt.plot(lb, Cteo, label = r'$C_{teo}$'); plt.plot(lb, Csim, 'o', label = r'$C_{sim}$')
plt.plot(lb, Cexp, '*', label = r'$C_{exp}$')
plt.legend(); plt.xlabel(r'$\lambda$'); plt.show()
2960/8
750/15
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listA = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listA.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listA[-1]))
#Now plotting the training graph
plt.plot(loss_listA)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listb = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listb.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listb[-1]))
#Now plotting the training graph
plt.plot(loss_listb)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 10
loss_listc = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listc.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listc[-1]))
#Now plotting the training graph
plt.plot(loss_listc)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_listd = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listd.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listd[-1]))
#Now plotting the training graph
plt.plot(loss_listd)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
##Adagrad
model = Net()
optimizer = optim.Adagrad(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#RMSprop
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#ADAM 20 epochs
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
loss.item()
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listE = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listE.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listE[-1]))
#Now plotting the training graph
plt.plot(loss_listE)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list3.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list3[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rcParams.update({"text.usetex": True})
plt.rcParams["figure.figsize"] = (6,4)
mpl.rcParams["figure.dpi"] = 200
from qiskit_ibm_runtime import Estimator, Session, QiskitRuntimeService, Options
from qiskit.quantum_info import SparsePauliOp
from qiskit import QuantumCircuit
service = QiskitRuntimeService()
backend_simulator = "backend_simulator"
backend = "ibmq_montreal"
qubits = 4
trotter_layer = QuantumCircuit(qubits)
trotter_layer.rx(0.1, range(qubits))
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.rz(-0.2, [1, 3])
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.cx(1, 2)
trotter_layer.rz(-0.2, 2)
trotter_layer.cx(1, 2)
num_steps = 6
trotter_circuit_list = []
for i in range(1, num_steps):
trotter_circuit = QuantumCircuit(qubits)
for _ in range(i):
trotter_circuit = trotter_circuit.compose(trotter_layer)
trotter_circuit_list.append(trotter_circuit)
print(f'Trotter circuit with {i} Trotter steps`)
display(trotter_circuit.draw(fold=-1))
obs = SparsePauliOp("Z"*qubits)
obs_list = [obs]*len(trotter_circuit_list)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No mitigation
with Session(service=service, backend=backend_simulator) as session:
estimator_sim = Estimator(session=session, options=options)
job_sim = estimator_sim.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_sim.job_id)
print(job_sim.result())
expvals_ideal = job_sim.result().values
expvals_ideal_variance = [metadata['variance']/metadata['shots'] for metadata in job_sim.result().metadata]
std_error_ideal = np.sqrt(expvals_ideal_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job.job_id)
print(job.result())
expvals_unmit = job.result().values
expvals_unmit_variance = [metadata['variance']/metadata['shots'] for metadata in job.result().metadata]
std_error_unmit = np.sqrt(expvals_unmit_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 3 # Dynamical decoupling
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_dd = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_dd.job_id)
print(job_dd.result())
expvals_unmit_dd = job_dd.result().values
expvals_unmit_dd_variance = [metadata['variance']/metadata['shots'] for metadata in job_dd.result().metadata]
std_error_dd = np.sqrt(expvals_unmit_dd_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_dd, std_error_dd, fmt = 'o', linestyle = '-', capsize=4, c='blue', label='Dynamical decoupling')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.resilience_level = 1 # T-REx
options.optimization_level = 0 # No optimization
options.execution.shots = 1000
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_trex = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_trex.job_id)
print(job_trex.result())
expvals_unmit_trex = job_trex.result().values
expvals_unmit_trex_variance = [metadata['variance']/metadata['shots'] for metadata in job_trex.result().metadata]
std_error_trex = np.sqrt(expvals_unmit_trex_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 2 # ZNE
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_zne = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne.job_id)
print(job_zne.result())
expvals_unmit_zne = job_zne.result().values
# Standard error: coming soon!
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.xlabel('No. Trotter Steps')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
def interim_results_callback(job_id, result):
now = datetime.datetime.now()
print(now, "*** Callback ***", result, "\n")
options = Options()
options.optimization_level = 0 # No optimization
options.execution.shots = 100
options.resilience_level = 3 # PEC
options.environment.callback = interim_results_callback
with Session(service=service, backend=backend) as session:
estimator_pec = Estimator(session=session, options=options)
job_pec = estimator_pec.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_pec.job_id)
expvals_pec = job_pec.result().values
std_error_pec = [metadata['standard_error'] for metadata in job_pec.result().metadata]
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
print(job_pec.result())
pec_metadata = job_pec.result().metadata
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
twin1 = ax.twinx()
twin2 = ax.twinx()
twin3 = ax.twinx()
twin2.spines.right.set_position(("axes", 1.2))
twin3.spines.right.set_position(("axes", 1.4))
p1, = ax.plot(range(1, num_steps), [m["total_mitigated_layers"] for m in pec_metadata] , "b-", label="Total mitigated layers")
p2, = twin1.plot(range(1, num_steps), [m["sampling_overhead"] for m in pec_metadata], "r-", label="Sampling overhead")
p3, = twin2.plot(range(1, num_steps), [m["samples"] for m in pec_metadata], "g-", label="Samples")
p4, = twin3.plot(range(1, num_steps), [m["shots"] for m in pec_metadata], "c-", label="Shots")
ax.set_ylim(0, 20)
twin1.set_ylim(0, 2.8)
twin2.set_ylim(0, 300)
twin3.set_ylim(0, 35000)
ax.set_xlabel("No. Trotter Steps")
ax.set_ylabel("Total mitigated layers")
twin1.set_ylabel("Sampling overhead")
twin2.set_ylabel("Samples")
twin3.set_ylabel("Shots")
ax.yaxis.label.set_color(p1.get_color())
twin1.yaxis.label.set_color(p2.get_color())
twin2.yaxis.label.set_color(p3.get_color())
twin3.yaxis.label.set_color(p4.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
twin1.tick_params(axis='y', colors=p2.get_color(), **tkw)
twin2.tick_params(axis='y', colors=p3.get_color(), **tkw)
twin3.tick_params(axis='y', colors=p4.get_color(), **tkw)
plt.xticks([1, 2, 3, 4, 5])
ax.legend(handles=[p1, p2, p3, p4])
plt.title('PEC metadata')
plt.show()
from matplotlib.pyplot import figure
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.title('Trotter circuits expectation value')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # no optimization
options.resilience_level = 2 # ZNE
options.resilience.noise_factors = [1, 2, 3, 4]
options.resilience.noise_amplifier = "LocalFoldingAmplifier"
options.resilience.extrapolator = "QuadraticExtrapolator"
with Session(service=service, backend='ibmq_montreal') as session:
estimator = Estimator(session=session, options=options)
job_zne_options = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne_options.job_id)
print(job_zne_options.result())
from qiskit.tools import jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_optimization.problems import QuadraticProgram
# define a problem
qp = QuadraticProgram()
qp.binary_var("x")
qp.integer_var(name="y", lowerbound=-1, upperbound=4)
qp.maximize(quadratic={("x", "y"): 1})
qp.linear_constraint({"x": 1, "y": -1}, "<=", 0)
print(qp.prettyprint())
from qiskit_optimization.algorithms import CplexOptimizer, GurobiOptimizer
cplex_result = CplexOptimizer().solve(qp)
gurobi_result = GurobiOptimizer().solve(qp)
print("cplex")
print(cplex_result.prettyprint())
print()
print("gurobi")
print(gurobi_result.prettyprint())
result = CplexOptimizer(disp=True, cplex_parameters={"threads": 1, "timelimit": 0.1}).solve(qp)
print(result.prettyprint())
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_aer import Aer
from qiskit.algorithms.minimum_eigensolvers import QAOA
from qiskit.algorithms.optimizers import COBYLA
from qiskit.primitives import Sampler
meo = MinimumEigenOptimizer(QAOA(sampler=Sampler(), optimizer=COBYLA(maxiter=100)))
result = meo.solve(qp)
print(result.prettyprint())
print("\ndisplay the best 5 solution samples")
for sample in result.samples[:5]:
print(sample)
# docplex model
from docplex.mp.model import Model
docplex_model = Model("docplex")
x = docplex_model.binary_var("x")
y = docplex_model.integer_var(-1, 4, "y")
docplex_model.maximize(x * y)
docplex_model.add_constraint(x <= y)
docplex_model.prettyprint()
# gurobi model
import gurobipy as gp
gurobipy_model = gp.Model("gurobi")
x = gurobipy_model.addVar(vtype=gp.GRB.BINARY, name="x")
y = gurobipy_model.addVar(vtype=gp.GRB.INTEGER, lb=-1, ub=4, name="y")
gurobipy_model.setObjective(x * y, gp.GRB.MAXIMIZE)
gurobipy_model.addConstr(x - y <= 0)
gurobipy_model.update()
gurobipy_model.display()
from qiskit_optimization.translators import from_docplex_mp, from_gurobipy
qp = from_docplex_mp(docplex_model)
print("QuadraticProgram obtained from docpblex")
print(qp.prettyprint())
print("-------------")
print("QuadraticProgram obtained from gurobipy")
qp2 = from_gurobipy(gurobipy_model)
print(qp2.prettyprint())
from qiskit_optimization.translators import to_gurobipy, to_docplex_mp
gmod = to_gurobipy(from_docplex_mp(docplex_model))
print("convert docplex to gurobipy via QuadraticProgram")
gmod.display()
dmod = to_docplex_mp(from_gurobipy(gurobipy_model))
print("\nconvert gurobipy to docplex via QuadraticProgram")
print(dmod.export_as_lp_string())
ind_mod = Model("docplex")
x = ind_mod.binary_var("x")
y = ind_mod.integer_var(-1, 2, "y")
z = ind_mod.integer_var(-1, 2, "z")
ind_mod.maximize(3 * x + y - z)
ind_mod.add_indicator(x, y >= z, 1)
print(ind_mod.export_as_lp_string())
qp = from_docplex_mp(ind_mod)
result = meo.solve(qp) # apply QAOA to QuadraticProgram
print("QAOA")
print(result.prettyprint())
print("-----\nCPLEX")
print(ind_mod.solve()) # apply CPLEX directly to the Docplex model
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.units import DistanceUnit
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver(
atom="H 0 0 0; H 0 0 0.735",
basis="sto3g",
charge=0,
spin=0,
unit=DistanceUnit.ANGSTROM,
)
es_problem = driver.run()
from qiskit_nature.second_q.mappers import JordanWignerMapper
mapper = JordanWignerMapper()
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
numpy_solver = NumPyMinimumEigensolver()
from qiskit.algorithms.minimum_eigensolvers import VQE
from qiskit.algorithms.optimizers import SLSQP
from qiskit.primitives import Estimator
from qiskit_nature.second_q.circuit.library import HartreeFock, UCCSD
ansatz = UCCSD(
es_problem.num_spatial_orbitals,
es_problem.num_particles,
mapper,
initial_state=HartreeFock(
es_problem.num_spatial_orbitals,
es_problem.num_particles,
mapper,
),
)
vqe_solver = VQE(Estimator(), ansatz, SLSQP())
vqe_solver.initial_point = [0.0] * ansatz.num_parameters
from qiskit.algorithms.minimum_eigensolvers import VQE
from qiskit.circuit.library import TwoLocal
tl_circuit = TwoLocal(
rotation_blocks=["h", "rx"],
entanglement_blocks="cz",
entanglement="full",
reps=2,
parameter_prefix="y",
)
another_solver = VQE(Estimator(), tl_circuit, SLSQP())
from qiskit_nature.second_q.algorithms import GroundStateEigensolver
calc = GroundStateEigensolver(mapper, vqe_solver)
res = calc.solve(es_problem)
print(res)
calc = GroundStateEigensolver(mapper, numpy_solver)
res = calc.solve(es_problem)
print(res)
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
from qiskit_nature.second_q.drivers import GaussianForcesDriver
from qiskit_nature.second_q.mappers import DirectMapper
from qiskit_nature.second_q.problems import HarmonicBasis
driver = GaussianForcesDriver(logfile="aux_files/CO2_freq_B3LYP_631g.log")
basis = HarmonicBasis([2, 2, 2, 2])
vib_problem = driver.run(basis=basis)
vib_problem.hamiltonian.truncation_order = 2
mapper = DirectMapper()
solver_without_filter = NumPyMinimumEigensolver()
solver_with_filter = NumPyMinimumEigensolver(
filter_criterion=vib_problem.get_default_filter_criterion()
)
gsc_wo = GroundStateEigensolver(mapper, solver_without_filter)
result_wo = gsc_wo.solve(vib_problem)
gsc_w = GroundStateEigensolver(mapper, solver_with_filter)
result_w = gsc_w.solve(vib_problem)
print(result_wo)
print("\n\n")
print(result_w)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Bikramaditya0154/Quantum-Simulation-of-the-ground-states-of-Li-and-Li-2-using-Variational-Quantum-EIgensolver
|
Bikramaditya0154
|
from qiskit import Aer
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import (
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
molecule = Molecule(
geometry=[["Li", [0.0, 0.0, 0.0]]], charge=1, multiplicity=1
)
driver = ElectronicStructureMoleculeDriver(
molecule, basis="sto3g", driver_type=ElectronicStructureDriverType.PYSCF
)
es_problem = ElectronicStructureProblem(driver)
qubit_converter = QubitConverter(JordanWignerMapper())
from qiskit.providers.aer import StatevectorSimulator
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_nature.algorithms import VQEUCCFactory
quantum_instance = QuantumInstance(backend=Aer.get_backend("aer_simulator_statevector"))
vqe_solver = VQEUCCFactory(quantum_instance=quantum_instance)
from qiskit.algorithms import VQE
from qiskit.circuit.library import TwoLocal
tl_circuit = TwoLocal(
rotation_blocks=["h", "rx"],
entanglement_blocks="cz",
entanglement="full",
reps=2,
parameter_prefix="y",
)
another_solver = VQE(
ansatz=tl_circuit,
quantum_instance=QuantumInstance(Aer.get_backend("aer_simulator_statevector")),
)
from qiskit_nature.algorithms import GroundStateEigensolver
calc = GroundStateEigensolver(qubit_converter, vqe_solver)
res = calc.solve(es_problem)
print(res)
|
https://github.com/aryashah2k/Quantum-Computing-Collection-Of-Resources
|
aryashah2k
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from qiskit import Aer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import QGAN
np.random.seed(2020)
N = 1000
real_data = np.random.binomial(3,0.5,N)
plt.hist(real_data, bins = 4);
n = 2
num_qubits = [n]
num_epochs = 100
batch_size = 100
bounds = [0,3]
qgan = QGAN(data = real_data,
num_qubits = num_qubits,
batch_size = batch_size,
num_epochs = num_epochs,
bounds = bounds,
seed = 2020)
quantum_instance = QuantumInstance(backend=Aer.get_backend('statevector_simulator'))
result = qgan.run(quantum_instance)
samples_g, prob_g = qgan.generator.get_output(qgan.quantum_instance, shots=10000)
plt.hist(range(4), weights = prob_g, bins = 4);
plt.title("Loss function evolution")
plt.plot(range(num_epochs), qgan.g_loss, label='Generator')
plt.plot(range(num_epochs), qgan.d_loss, label='Discriminator')
plt.legend()
plt.show()
plt.title('Relative entropy evolution')
plt.plot(qgan.rel_entr)
plt.show()
import qiskit
qiskit.__qiskit_version__
|
https://github.com/crabster/qiskit-learning
|
crabster
|
import qiskit
from math import pi
from random import random
def random_state_gate():
theta = 2*pi*random()
phi = 2*pi*random()
lam = 2*pi*random()
qc = qiskit.QuantumCircuit(1)
qc.u3(theta, phi, lam, 0)
gate = qc.to_gate()
gate.name = f"$U_3$ {theta:.2f},{phi:.2f},{lam:.2f}"
return gate
def phi_plus_gate():
qc = qiskit.QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
gate = qc.to_gate()
gate.name = "$\phi^{+}$"
return gate
def phi_minus_gate():
qc = qiskit.QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.x(0)
qc.z(0)
gate = qc.to_gate()
gate.name = "$\phi^{-}$"
return gate
def psi_plus_gate():
qc = qiskit.QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.x(0)
gate = qc.to_gate()
gate.name = "$\psi^{+}$"
return gate
def psi_minus_gate():
qc = qiskit.QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.z(0)
gate = qc.to_gate()
gate.name = "$\psi^{-}$"
return qc
|
https://github.com/jonasmaziero/computacao_quantica_qiskit
|
jonasmaziero
| |
https://github.com/PabloMartinezAngerosa/QAOA-uniform-convergence
|
PabloMartinezAngerosa
|
from tsp_qaoa import test_solution
from qiskit.visualization import plot_histogram
import networkx as nx
import numpy as np
import json
import csv
# Array of JSON Objects
header = ['p', 'state', 'probability', 'mean']
length_p = 10
with open('qaoa_multiple_p.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
# write the header
writer.writerow(header)
first_p = False
for p in range(length_p):
p = p+1
if first_p == False:
job_2, G, UNIFORM_CONVERGENCE_SAMPLE = test_solution(p=p)
first_p = True
else:
job_2, G, UNIFORM_CONVERGENCE_SAMPLE = test_solution(grafo=G, p=p)
# Sort the JSON data based on the value of the brand key
UNIFORM_CONVERGENCE_SAMPLE.sort(key=lambda x: x["mean"])
mean = UNIFORM_CONVERGENCE_SAMPLE[0]["mean"]
print(mean)
state = 0
for probability in UNIFORM_CONVERGENCE_SAMPLE[0]["probabilities"]:
state += 1
writer.writerow([p,state, probability,mean])
|
https://github.com/khalilguy/QiskitHackathon
|
khalilguy
|
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import qiskit
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
from qiskit import IBMQ
IBMQ.save_account('7e245f54848bdbcc6bedd42fcafcd2fbe8f81b765b2537e32d39f812c3ccc2e9c755a6ac3e3edc7529982f02954bff4b84cba76cef7fe71928b9f01b092feedf')
simulator = Aer.get_backend('qasm_simulator')
gs = simulator.configuration().basis_gates
len(gs)
gate_dictionary = {}
for i in range(len(gs)):
gate_dictionary[i] = gs[i]
print(gate_dictionary)
g = [i for i in range(33)]
print(g)
single_qubit_gate_dictionary = {0:'id',1:"u1", 2:"u2",3:"u3"}
rand_number_of_qubits = np.random.randint(1,15)
rand_number_of_gates = np.random.randint(2,4)
rand_qubit_for_gate = np.random.randint(1,rand_number_of_qubits)
random_circuit = QuantumCircuit(1,1)
single_qubit_gate_dictionary[0]
from inspect import getmembers, isfunction
functions_list = [o for o in getmembers(qiskit.circuit.library.standard_gates)]
functions_list
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer
from qiskit.compiler import transpile
from qiskit.quantum_info.operators import Operator, Pauli
from qiskit.quantum_info import process_fidelity
from qiskit.extensions import RXGate, XGate, CXGate
XX = Operator([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
XX
XX.data
input_dim, output_dim = XX.dim
input_dim, output_dim
op = Operator(np.random.rand(2 ** 1, 2 ** 2))
print('Input dimensions:', op.input_dims())
print('Output dimensions:', op.output_dims())
op = Operator(np.random.rand(6, 6))
print('Input dimensions:', op.input_dims())
print('Output dimensions:', op.output_dims())
# Force input dimension to be (4,) rather than (2, 2)
op = Operator(np.random.rand(2 ** 1, 2 ** 2), input_dims=[4])
print('Input dimensions:', op.input_dims())
print('Output dimensions:', op.output_dims())
# Specify system is a qubit and qutrit
op = Operator(np.random.rand(6, 6),
input_dims=[2, 3], output_dims=[2, 3])
print('Input dimensions:', op.input_dims())
print('Output dimensions:', op.output_dims())
print('Dimension of input system 0:', op.input_dims([0]))
print('Dimension of input system 1:', op.input_dims([1]))
# Create an Operator from a Pauli object
pauliXX = Pauli('XX')
Operator(pauliXX)
# Create an Operator for a Gate object
Operator(CXGate())
# Create an operator from a parameterized Gate object
Operator(RXGate(np.pi / 2))
# Create an operator from a QuantumCircuit object
circ = QuantumCircuit(10)
circ.h(0)
for j in range(1, 10):
circ.cx(j-1, j)
# Convert circuit to an operator by implicit unitary simulation
Operator(circ)
# Create an operator
XX = Operator(Pauli('XX'))
# Add to a circuit
circ = QuantumCircuit(2, 2)
circ.append(XX, [0, 1])
circ.measure([0,1], [0,1])
circ.draw('mpl')
backend = BasicAer.get_backend('qasm_simulator')
circ = transpile(circ, backend, basis_gates=['u1','u2','u3','cx'])
job = backend.run(circ)
job.result().get_counts(0)
# Add to a circuit
circ2 = QuantumCircuit(2, 2)
circ2.append(Pauli('XX'), [0, 1])
circ2.measure([0,1], [0,1])
circ2.draw()
A = Operator(Pauli('X'))
B = Operator(Pauli('Z'))
A.tensor(B)
A = Operator(Pauli('X'))
B = Operator(Pauli('Z'))
A.expand(B)
A = Operator(Pauli('X'))
B = Operator(Pauli('Z'))
A.compose(B)
A = Operator(Pauli('X'))
B = Operator(Pauli('Z'))
A.compose(B, front=True)
# Compose XZ with an 3-qubit identity operator
op = Operator(np.eye(2 ** 3))
XZ = Operator(Pauli('XZ'))
op.compose(XZ, qargs=[0, 2])
# Compose YX in front of the previous operator
op = Operator(np.eye(2 ** 3))
YX = Operator(Pauli('YX'))
op.compose(XZ, qargs=[0, 2], front=True)
XX = Operator(Pauli('XX'))
YY = Operator(Pauli('YY'))
ZZ = Operator(Pauli('ZZ'))
op = 0.5 * (XX + YY - 3 * ZZ)
op
op.is_unitary()
# Compose with a matrix passed as a list
Operator(np.eye(2)).compose([[0, 1], [1, 0]])
Operator(Pauli('X')) == Operator(XGate())
Operator(XGate()) == np.exp(1j * 0.5) * Operator(XGate())
# Two operators which differ only by phase
op_a = Operator(XGate())
op_b = np.exp(1j * 0.5) * Operator(XGate())
# Compute process fidelity
F = process_fidelity(op_a, op_b)
print('Process fidelity =', F)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/ionq-samples/qiskit-getting-started
|
ionq-samples
|
#import Aer here, before calling qiskit_ionq_provider
from qiskit import Aer
from qiskit_ionq import IonQProvider
#Call provider and set token value
provider = IonQProvider(token='my token')
provider.backends()
from qiskit import *
#import qiskit.aqua as aqua
#from qiskit.quantum_info import Pauli
#from qiskit.aqua.operators.primitive_ops import PauliOp
from qiskit.circuit.library import PhaseEstimation
from qiskit import QuantumCircuit
from matplotlib import pyplot as plt
import numpy as np
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.drivers import Molecule
def buildControlledT(p, m):
# initialize the circuit
qc = QuantumCircuit(2, 1)
# Hardmard on ancilla, now in |+>
qc.h(0)
# initialize to |1>
qc.x(1)
# applying T gate to qubit 1
for i in range(2**m):
qc.cp(np.pi/4, 0, 1)
# phase correction
qc.rz(p, 0)
# inverse QFT (in other words, just measuring in the x-basis)
qc.h(0)
qc.measure([0],[0])
return qc
def IPEA(k, backend_string):
# get backend
if backend_string == 'qpu':
backend = provider.get_backend('ionq_qpu')
elif backend_string == 'qasm':
backend = Aer.get_backend('qasm_simulator')
# bits
bits = []
# phase correction
phase = 0.0
# loop over iterations
for i in range(k-1, -1, -1):
# construct the circuit
qc = buildControlledT(phase, i)
# run the circuit
job = execute(qc, backend)
if backend_string == 'qpu':
from qiskit.providers.jobstatus import JobStatus
import time
# Check if job is done
while job.status() is not JobStatus.DONE:
print("Job status is", job.status() )
time.sleep(60)
# grab a coffee! This can take up to a few minutes.
# once we break out of that while loop, we know our job is finished
print("Job status is", job.status() )
print(job.get_counts()) # these counts are the “true” counts from the actual QPU Run
# get result
result = job.result()
# get current bit
this_bit = int(max(result.get_counts(), key=result.get_counts().get))
print(result.get_counts())
bits.append(this_bit)
# update phase correction
phase /= 2
phase -= (2 * np.pi * this_bit / 4.0)
return bits
def eig_from_bits(bits):
eig = 0.
m = len(bits)
# loop over all bits
for k in range(len(bits)):
eig += bits[k] / (2**(m-k))
#eig *= 2*np.pi
return eig
# perform IPEA
backend = 'qasm'
bits = IPEA(5, backend)
print(bits)
# re-construct energy
eig = eig_from_bits(bits)
print(eig)
#perform IPEA with different values of n
n_values = []
eig_values = []
for i in range(1, 8):
n_values.append(i)
# perform IPEA
backend = 'qasm'
bits = IPEA(i, backend)
# re-construct energy
eig = eig_from_bits(bits)
eig_values.append(eig)
n_values, eig_values = np.array(n_values), np.array(eig_values)
plt.plot(n_values, eig_values)
plt.xlabel('n (bits)', fontsize=15)
plt.ylabel(r'$\phi$', fontsize=15)
plt.title(r'$\phi$ vs. n', fontsize=15)
# perform IPEA
backend = 'qpu'
bits = IPEA(5, backend)
print(bits)
# re-construct energy
eig = eig_from_bits(bits)
print(eig)
#perform IPEA with different values of n
n_values = []
eig_values = []
for i in range(1, 8):
n_values.append(i)
# perform IPEA
backend = 'qpu'
bits = IPEA(i, backend)
# re-construct energy
eig = eig_from_bits(bits)
eig_values.append(eig)
n_values, eig_values = np.array(n_values), np.array(eig_values)
plt.plot(n_values, eig_values)
plt.xlabel('n (bits)', fontsize=15)
plt.ylabel(r'$\phi$', fontsize=15)
plt.title(r'$\phi$ vs. n', fontsize=15)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
top = QuantumCircuit(1)
top.x(0);
bottom = QuantumCircuit(2)
bottom.cry(0.2, 0, 1);
tensored = bottom.tensor(top)
tensored.draw('mpl')
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from sklearn.datasets import make_blobs
# example dataset
features, labels = make_blobs(n_samples=20, n_features=2, centers=2, random_state=3, shuffle=True)
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
features = MinMaxScaler(feature_range=(0, np.pi)).fit_transform(features)
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, train_size=15, shuffle=False
)
# number of qubits is equal to the number of features
num_qubits = 2
# number of steps performed during the training procedure
tau = 100
# regularization parameter
C = 1000
from qiskit import BasicAer
from qiskit.circuit.library import ZFeatureMap
from qiskit.utils import algorithm_globals
from qiskit_machine_learning.kernels import FidelityQuantumKernel
algorithm_globals.random_seed = 12345
feature_map = ZFeatureMap(feature_dimension=num_qubits, reps=1)
qkernel = FidelityQuantumKernel(feature_map=feature_map)
from qiskit_machine_learning.algorithms import PegasosQSVC
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel, C=C, num_steps=tau)
# training
pegasos_qsvc.fit(train_features, train_labels)
# testing
pegasos_score = pegasos_qsvc.score(test_features, test_labels)
print(f"PegasosQSVC classification test score: {pegasos_score}")
grid_step = 0.2
margin = 0.2
grid_x, grid_y = np.meshgrid(
np.arange(-margin, np.pi + margin, grid_step), np.arange(-margin, np.pi + margin, grid_step)
)
meshgrid_features = np.column_stack((grid_x.ravel(), grid_y.ravel()))
meshgrid_colors = pegasos_qsvc.predict(meshgrid_features)
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 5))
meshgrid_colors = meshgrid_colors.reshape(grid_x.shape)
plt.pcolormesh(grid_x, grid_y, meshgrid_colors, cmap="RdBu", shading="auto")
plt.scatter(
train_features[:, 0][train_labels == 0],
train_features[:, 1][train_labels == 0],
marker="s",
facecolors="w",
edgecolors="r",
label="A train",
)
plt.scatter(
train_features[:, 0][train_labels == 1],
train_features[:, 1][train_labels == 1],
marker="o",
facecolors="w",
edgecolors="b",
label="B train",
)
plt.scatter(
test_features[:, 0][test_labels == 0],
test_features[:, 1][test_labels == 0],
marker="s",
facecolors="r",
edgecolors="r",
label="A test",
)
plt.scatter(
test_features[:, 0][test_labels == 1],
test_features[:, 1][test_labels == 1],
marker="o",
facecolors="b",
edgecolors="b",
label="B test",
)
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0)
plt.title("Pegasos Classification")
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
r"""
Channel event manager for pulse schedules.
This module provides a `ChannelEvents` class that manages a series of instructions for a
pulse channel. Channel-wise filtering of the pulse program makes
the arrangement of channels easier in the core drawer function.
The `ChannelEvents` class is expected to be called by other programs (not by end-users).
The `ChannelEvents` class instance is created with the class method ``load_program``:
.. code-block:: python
event = ChannelEvents.load_program(sched, DriveChannel(0))
The `ChannelEvents` is created for a specific pulse channel and loosely assorts pulse
instructions within the channel with different visualization purposes.
Phase and frequency related instructions are loosely grouped as frame changes.
The instantaneous value of those operands are combined and provided as ``PhaseFreqTuple``.
Instructions that have finite duration are grouped as waveforms.
The grouped instructions are returned as an iterator by the corresponding method call:
.. code-block:: python
for t0, frame, instruction in event.get_waveforms():
...
for t0, frame_change, instructions in event.get_frame_changes():
...
The class method ``get_waveforms`` returns the iterator of waveform type instructions with
the ``PhaseFreqTuple`` (frame) at the time when instruction is issued.
This is because a pulse envelope of ``Waveform`` may be modulated with a
phase factor $exp(-i \omega t - \phi)$ with frequency $\omega$ and phase $\phi$ and
appear on the canvas. Thus, it is better to tell users in which phase and frequency
the pulse envelope is modulated from a viewpoint of program debugging.
On the other hand, the class method ``get_frame_changes`` returns a ``PhaseFreqTuple`` that
represents a total amount of change at that time because it is convenient to know
the operand value itself when we debug a program.
Because frame change type instructions are usually zero duration, multiple instructions
can be issued at the same time and those operand values should be appropriately
combined. In Qiskit Pulse we have set and shift type instructions for the frame control,
the set type instruction will be converted into the relevant shift amount for visualization.
Note that these instructions are not interchangeable and the order should be kept.
For example:
.. code-block:: python
sched1 = Schedule()
sched1 = sched1.insert(0, ShiftPhase(-1.57, DriveChannel(0))
sched1 = sched1.insert(0, SetPhase(3.14, DriveChannel(0))
sched2 = Schedule()
sched2 = sched2.insert(0, SetPhase(3.14, DriveChannel(0))
sched2 = sched2.insert(0, ShiftPhase(-1.57, DriveChannel(0))
In this example, ``sched1`` and ``sched2`` will have different frames.
On the drawer canvas, the total frame change amount of +3.14 should be shown for ``sched1``,
while ``sched2`` is +1.57. Since the `SetPhase` and the `ShiftPhase` instruction behave
differently, we cannot simply sum up the operand values in visualization output.
It should be also noted that zero duration instructions issued at the same time will be
overlapped on the canvas. Thus it is convenient to plot a total frame change amount rather
than plotting each operand value bound to the instruction.
"""
from __future__ import annotations
from collections import defaultdict
from collections.abc import Iterator
from qiskit import pulse, circuit
from qiskit.visualization.pulse_v2.types import PhaseFreqTuple, PulseInstruction
class ChannelEvents:
"""Channel event manager."""
_waveform_group = (
pulse.instructions.Play,
pulse.instructions.Delay,
pulse.instructions.Acquire,
)
_frame_group = (
pulse.instructions.SetFrequency,
pulse.instructions.ShiftFrequency,
pulse.instructions.SetPhase,
pulse.instructions.ShiftPhase,
)
def __init__(
self,
waveforms: dict[int, pulse.Instruction],
frames: dict[int, list[pulse.Instruction]],
channel: pulse.channels.Channel,
):
"""Create new event manager.
Args:
waveforms: List of waveforms shown in this channel.
frames: List of frame change type instructions shown in this channel.
channel: Channel object associated with this manager.
"""
self._waveforms = waveforms
self._frames = frames
self.channel = channel
# initial frame
self._init_phase = 0.0
self._init_frequency = 0.0
# time resolution
self._dt = 0.0
@classmethod
def load_program(cls, program: pulse.Schedule, channel: pulse.channels.Channel):
"""Load a pulse program represented by ``Schedule``.
Args:
program: Target ``Schedule`` to visualize.
channel: The channel managed by this instance.
Returns:
ChannelEvents: The channel event manager for the specified channel.
"""
waveforms = {}
frames = defaultdict(list)
# parse instructions
for t0, inst in program.filter(channels=[channel]).instructions:
if isinstance(inst, cls._waveform_group):
if inst.duration == 0:
# special case, duration of delay can be zero
continue
waveforms[t0] = inst
elif isinstance(inst, cls._frame_group):
frames[t0].append(inst)
return ChannelEvents(waveforms, frames, channel)
def set_config(self, dt: float, init_frequency: float, init_phase: float):
"""Setup system status.
Args:
dt: Time resolution in sec.
init_frequency: Modulation frequency in Hz.
init_phase: Initial phase in rad.
"""
self._dt = dt or 1.0
self._init_frequency = init_frequency or 0.0
self._init_phase = init_phase or 0.0
def get_waveforms(self) -> Iterator[PulseInstruction]:
"""Return waveform type instructions with frame."""
sorted_frame_changes = sorted(self._frames.items(), key=lambda x: x[0], reverse=True)
sorted_waveforms = sorted(self._waveforms.items(), key=lambda x: x[0])
# bind phase and frequency with instruction
phase = self._init_phase
frequency = self._init_frequency
for t0, inst in sorted_waveforms:
is_opaque = False
while len(sorted_frame_changes) > 0 and sorted_frame_changes[-1][0] <= t0:
_, frame_changes = sorted_frame_changes.pop()
phase, frequency = ChannelEvents._calculate_current_frame(
frame_changes=frame_changes, phase=phase, frequency=frequency
)
# Convert parameter expression into float
if isinstance(phase, circuit.ParameterExpression):
phase = float(phase.bind({param: 0 for param in phase.parameters}))
if isinstance(frequency, circuit.ParameterExpression):
frequency = float(frequency.bind({param: 0 for param in frequency.parameters}))
frame = PhaseFreqTuple(phase, frequency)
# Check if pulse has unbound parameters
if isinstance(inst, pulse.Play):
is_opaque = inst.pulse.is_parameterized()
yield PulseInstruction(t0, self._dt, frame, inst, is_opaque)
def get_frame_changes(self) -> Iterator[PulseInstruction]:
"""Return frame change type instructions with total frame change amount."""
# TODO parse parametrised FCs correctly
sorted_frame_changes = sorted(self._frames.items(), key=lambda x: x[0])
phase = self._init_phase
frequency = self._init_frequency
for t0, frame_changes in sorted_frame_changes:
is_opaque = False
pre_phase = phase
pre_frequency = frequency
phase, frequency = ChannelEvents._calculate_current_frame(
frame_changes=frame_changes, phase=phase, frequency=frequency
)
# keep parameter expression to check either phase or frequency is parameterized
frame = PhaseFreqTuple(phase - pre_phase, frequency - pre_frequency)
# remove parameter expressions to find if next frame is parameterized
if isinstance(phase, circuit.ParameterExpression):
phase = float(phase.bind({param: 0 for param in phase.parameters}))
is_opaque = True
if isinstance(frequency, circuit.ParameterExpression):
frequency = float(frequency.bind({param: 0 for param in frequency.parameters}))
is_opaque = True
yield PulseInstruction(t0, self._dt, frame, frame_changes, is_opaque)
@classmethod
def _calculate_current_frame(
cls, frame_changes: list[pulse.instructions.Instruction], phase: float, frequency: float
) -> tuple[float, float]:
"""Calculate the current frame from the previous frame.
If parameter is unbound phase or frequency accumulation with this instruction is skipped.
Args:
frame_changes: List of frame change instructions at a specific time.
phase: Phase of previous frame.
frequency: Frequency of previous frame.
Returns:
Phase and frequency of new frame.
"""
for frame_change in frame_changes:
if isinstance(frame_change, pulse.instructions.SetFrequency):
frequency = frame_change.frequency
elif isinstance(frame_change, pulse.instructions.ShiftFrequency):
frequency += frame_change.frequency
elif isinstance(frame_change, pulse.instructions.SetPhase):
phase = frame_change.phase
elif isinstance(frame_change, pulse.instructions.ShiftPhase):
phase += frame_change.phase
return phase, frequency
|
https://github.com/hritiksauw199/Optimized-Shor-Algorithm-for-factoring
|
hritiksauw199
|
import matplotlib.pyplot as plt
import numpy as np
from qiskit import IBMQ, QuantumCircuit, Aer, transpile, assemble
from qiskit.visualization import plot_histogram
from math import gcd
from numpy.random import randint
import pandas as pd
from qiskit.providers.ibmq import least_busy
from fractions import Fraction
def qft_inv(n):
qc = QuantumCircuit(n)
for qubit in range(n//2):
qc.swap(qubit, n-qubit-1)
for j in range(n):
for m in range(j):
qc.cp(-np.pi/float(2**(j-m)), m, j)
qc.h(j)
qc.name = "QFT_INV"
return qc
n_count = 4
n = 5
qc = QuantumCircuit(n_count+4, n_count)
for q in range(n_count):
qc.h(q)
qc.cx(0,4)
qc.cx(1,5)
qc.cx(2,6)
qc.cx(3,7)
qc.append(qft_inv(n_count), range(n_count))
qc.measure(range(n_count), range(n_count))
qc.draw()
aer_sim = Aer.get_backend('aer_simulator')
t_qc = transpile(qc, aer_sim)
qobj = assemble(t_qc)
results = aer_sim.run(qobj).result()
counts = results.get_counts()
plot_histogram(counts)
rows, measured_phases = [], []
for output in counts:
decimal = int(output, 2) # Convert (base 2) string to decimal
phase = decimal/(2**n_count) # Find corresponding eigenvalue
measured_phases.append(phase)
# Add these values to the rows in our table:
rows.append([f"{output}(bin) = {decimal:>3}(dec)",
f"{decimal}/{2**n_count} = {phase:.2f}"])
# Print the rows in a table
headers=["Register Output", "Phase"]
df = pd.DataFrame(rows, columns=headers)
print(df)
rows = []
for phase in measured_phases:
frac = Fraction(phase).limit_denominator(15)
rows.append([phase, f"{frac.numerator}/{frac.denominator}", frac.denominator])
# Print as a table
headers=["Phase", "Fraction", "Guess for r"]
df = pd.DataFrame(rows, columns=headers)
print(df)
from math import gcd # greatest common divisor
gcd(255,21)
|
https://github.com/Advanced-Research-Centre/QPULBA
|
Advanced-Research-Centre
|
from qiskit import QuantumCircuit
import numpy as np
import random
from qiskit import Aer, execute
from math import log2, ceil, pi
circ = QuantumCircuit(8)
simulator = Aer.get_backend('statevector_simulator')
def disp_isv(circ, msg="", all=True, precision=1e-8):
sim_res = execute(circ, simulator).result()
statevector = sim_res.get_statevector(circ)
qb = int(log2(len(statevector)))
print("\n============ State Vector ============", msg)
s = 0
for i in statevector:
if (all == True): print(' ({:.5f}) |{:0{}b}>'.format(i,s,qb))
else:
if (abs(i) > precision): print(' ({:+.5f}) |{:0{}b}>'.format(i,s,qb))
s = s+1
print("============..............============")
return
# Step 1
circ.h(1)#ry(round(pi * random.random(),2),1)
circ.h(2)#ry(round(pi * random.random(),2),2)
circ.barrier()
disp_isv(circ,"Step 1",False,1e-4)
# Step 2
aU0 = round(pi * random.random(),2)
circ.ry(aU0,0)
circ.barrier()
disp_isv(circ,"Step 2",False,1e-4)
# Step 3
#circ.cx(0,4)
#circ.cx(1,5)
#circ.cx(2,6)
circ.barrier()
disp_isv(circ,"Step 3",False,1e-4)
# Step 4
circ.x(0)
circ.toffoli(0,1,3)
circ.x(0)
circ.toffoli(0,2,3)
circ.barrier()
disp_isv(circ,"Step 4",False,1e-4)
# Step 5
circ.cx(3,7)
circ.barrier()
disp_isv(circ,"Step 4",False,1e-4)
# Step 4
circ.toffoli(0,2,3)
circ.x(0)
circ.toffoli(0,1,3)
circ.x(0)
circ.barrier()
disp_isv(circ,"Step 4",False,1e-4)
## Step 5
#circ.ry(-aU0,0)
#circ.h(1)
#circ.h(2)
#circ.barrier()
#disp_isv(circ,"Step 5",False,1e-4)
## Step 6
#circ.swap(0,3)
#disp_isv(circ,"Step 6",False,1e-4)
print()
print(circ.draw())
|
https://github.com/MonitSharma/Qiskit-Summer-School-and-Quantum-Challenges
|
MonitSharma
|
!pip install -U -r grading_tools/requirements.txt
from IPython.display import clear_output
clear_output()
import numpy as np; pi = np.pi
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
from copy import deepcopy as make_copy
def prepare_hets_circuit(depth, angle1, angle2):
hets_circ = QuantumCircuit(depth)
hets_circ.ry(angle1, 0)
hets_circ.rz(angle1, 0)
hets_circ.ry(angle1, 1)
hets_circ.rz(angle1, 1)
for ii in range(depth):
hets_circ.cx(0,1)
hets_circ.ry(angle2,0)
hets_circ.rz(angle2,0)
hets_circ.ry(angle2,1)
hets_circ.rz(angle2,1)
return hets_circ
hets_circuit = prepare_hets_circuit(2, pi/2, pi/2)
hets_circuit.draw()
def measure_zz_circuit(given_circuit):
zz_meas = make_copy(given_circuit)
zz_meas.measure_all()
return zz_meas
zz_meas = measure_zz_circuit(hets_circuit)
zz_meas.draw()
simulator = Aer.get_backend('qasm_simulator')
result = execute(zz_meas, backend = simulator, shots=10000).result()
counts = result.get_counts(zz_meas)
plot_histogram(counts)
def measure_zz(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
zz = counts['00'] + counts['11'] - counts['01'] - counts['10']
zz = zz / total_counts
return zz
zz = measure_zz(hets_circuit)
print("<ZZ> =", str(zz))
def measure_zi(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
zi = counts['00'] - counts['11'] + counts['01'] - counts['10']
zi = zi / total_counts
return zi
def measure_iz(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
iz = counts['00'] - counts['11'] - counts['01'] + counts['10']
iz = iz / total_counts
return iz
zi = measure_zi(hets_circuit)
print("<ZI> =", str(zi))
iz = measure_iz(hets_circuit)
print("<IZ> =", str(iz))
def measure_xx_circuit(given_circuit):
xx_meas = make_copy(given_circuit)
### WRITE YOUR CODE BETWEEN THESE LINES - START
xx_meas.h(0)
xx_meas.h(1)
xx_meas.measure_all()
### WRITE YOUR CODE BETWEEN THESE LINES - END
return xx_meas
xx_meas = measure_xx_circuit(hets_circuit)
xx_meas.draw()
def measure_xx(given_circuit, num_shots = 10000):
xx_meas = measure_xx_circuit(given_circuit)
result = execute(xx_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(xx_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
xx = counts['00'] + counts['11'] - counts['01'] - counts['10']
xx = xx / total_counts
return xx
xx = measure_xx(hets_circuit)
print("<XX> =", str(xx))
def get_energy(given_circuit, num_shots = 10000):
zz = measure_zz(given_circuit, num_shots = num_shots)
iz = measure_iz(given_circuit, num_shots = num_shots)
zi = measure_zi(given_circuit, num_shots = num_shots)
xx = measure_xx(given_circuit, num_shots = num_shots)
energy = (-1.0523732)*1 + (0.39793742)*iz + (-0.3979374)*zi + (-0.0112801)*zz + (0.18093119)*xx
return energy
energy = get_energy(hets_circuit)
print("The energy of the trial state is", str(energy))
hets_circuit_plus = None
hets_circuit_minus = None
### WRITE YOUR CODE BETWEEN THESE LINES - START
hets_circuit_plus = prepare_hets_circuit(2, pi/2 + 0.1*pi/2, pi/2)
hets_circuit_minus = prepare_hets_circuit(2, pi/2 - 0.1*pi/2, pi/2)
### WRITE YOUR CODE BETWEEN THESE LINES - END
energy_plus = get_energy(hets_circuit_plus, num_shots=100000)
energy_minus = get_energy(hets_circuit_minus, num_shots=100000)
print(energy_plus, energy_minus)
name = 'Pon Rahul M'
email = 'ponrahul.21it@licet.ac.in'
### Do not change the lines below
from grading_tools import grade
grade(answer=measure_xx_circuit(hets_circuit), name=name, email=email, labid='lab9', exerciseid='ex1')
grade(answer=hets_circuit_plus, name=name, email=email, labid='lab9', exerciseid='ex2')
grade(answer=hets_circuit_minus, name=name, email=email, labid='lab9', exerciseid='ex3')
energy_plus_100, energy_plus_1000, energy_plus_10000 = 0, 0, 0
energy_minus_100, energy_minus_1000, energy_minus_10000 = 0, 0, 0
### WRITE YOUR CODE BETWEEN THESE LINES - START
energy_plus_100 = get_energy(hets_circuit_plus, num_shots = 100)
energy_minus_100 = get_energy(hets_circuit_minus, num_shots = 100)
energy_plus_1000 = get_energy(hets_circuit_plus, num_shots = 1000)
energy_minus_1000 = get_energy(hets_circuit_minus, num_shots = 1000)
energy_plus_10000 = get_energy(hets_circuit_plus, num_shots = 10000)
energy_minus_10000 = get_energy(hets_circuit_minus, num_shots = 10000)
### WRITE YOUR CODE BETWEEN THESE LINES - END
print(energy_plus_100, energy_minus_100, "difference = ", energy_minus_100 - energy_plus_100)
print(energy_plus_1000, energy_minus_1000, "difference = ", energy_minus_1000 - energy_plus_1000)
print(energy_plus_10000, energy_minus_10000, "difference = ", energy_minus_10000 - energy_plus_10000)
### WRITE YOUR CODE BETWEEN THESE LINES - START
I = np.array([
[1, 0],
[0, 1]
])
X = np.array([
[0, 1],
[1, 0]
])
Z = np.array([
[1, 0],
[0, -1]
])
h2_hamiltonian = (-1.0523732) * np.kron(I, I) + \
(0.39793742) * np.kron(I, Z) + \
(-0.3979374) * np.kron(Z, I) + \
(-0.0112801) * np.kron(Z, Z) + \
(0.18093119) * np.kron(X, X)
from numpy import linalg as LA
eigenvalues, eigenvectors = LA.eig(h2_hamiltonian)
for ii, eigenvalue in enumerate(eigenvalues):
print(f"Eigenvector {eigenvectors[:,ii]} has energy {eigenvalue}")
exact_eigenvector = eigenvectors[:,np.argmin(eigenvalues)]
exact_eigenvalue = np.min(eigenvalues)
print()
print("Minimum energy is", exact_eigenvalue)
### WRITE YOUR CODE BETWEEN THESE LINES - END
|
https://github.com/WhenTheyCry96/qiskitHackathon2022
|
WhenTheyCry96
|
import warnings
warnings.filterwarnings('ignore')
from qiskit_metal import designs, MetalGUI
design = designs.DesignPlanar()
design.overwrite_enabled = True
design.chips.main.size_x = '12mm'
design.chips.main.size_y = '10mm'
gui = MetalGUI(design)
from qiskit_metal.qlibrary.qubits.transmon_pocket_cl import TransmonPocketCL
design.delete_all_components()
design_span_x = 5
design_span_y = 3
half_chip_width = design_span_x / 2
half_chip_height = design_span_y / 2
connection_pads_options = dict(
a = dict(loc_W=1, loc_H=-1),
b = dict(loc_W=1, loc_H=1),
c = dict(loc_W=-1, loc_H=-1)
)
connection23_pads_options = dict(
a = dict(loc_W=1, loc_H=-1),
c = dict(loc_W=-1, loc_H=-1)
)
transmons = []
transmons.append(TransmonPocketCL(design, 'Q1',
options=dict(pos_x=f'-{half_chip_width}mm',
pos_y=f'{-half_chip_height}mm',
connection_pads=dict(**connection_pads_options))))
transmons.append(TransmonPocketCL(design, 'Q2',
options=dict(pos_x=f'0mm',
pos_y=f'{half_chip_height}mm',
orientation=-90,
connection_pads=dict(d=dict(loc_W=-1, loc_H=1), **connection23_pads_options))))
transmons.append(TransmonPocketCL(design, 'Q3',
options=dict(pos_x=f'0mm',
pos_y=f'{-half_chip_height}mm',
orientation=90,
connection_pads=dict(d=dict(loc_W=-1, loc_H=1), **connection23_pads_options))))
transmons.append(TransmonPocketCL(design, 'Q4',
options=dict(pos_x=f'{half_chip_width}mm',
pos_y=f'{half_chip_height}mm',
orientation=180,
connection_pads=dict(**connection_pads_options))))
gui.rebuild()
gui.autoscale()
from qiskit_metal.qlibrary.tlines.meandered import RouteMeander
from qiskit_metal import Dict
fillet='99.99um'
options = Dict(
meander=Dict(
lead_start='0.1mm',
lead_end='0.1mm',
asymmetry='0 um')
)
def connect(component_name: str, component1: str, pin1: str, component2: str, pin2: str,
length: str,
asymmetry='0 um', start_strght='0 um', end_strght='0 um', flip=False):
"""Connect two pins with a CPW."""
myoptions = Dict(
pin_inputs=Dict(
start_pin=Dict(
component=component1,
pin=pin1),
end_pin=Dict(
component=component2,
pin=pin2)),
lead=Dict(
start_straight=start_strght,
end_straight=end_strght
),
total_length=length,
fillet = '99.9um')
myoptions.update(options)
myoptions.meander.asymmetry = asymmetry
myoptions.meander.lead_direction_inverted = 'true' if flip else 'false'
return RouteMeander(design, component_name, myoptions)
asym_h = 100
asym_v = 100
cpw = []
cpw.append(connect('cpw1', 'Q1', 'b', 'Q2', 'a', '8 mm', f'+{asym_h}um', '0.1mm', '0.1mm'))
cpw.append(connect('cpw3', 'Q4', 'b', 'Q3', 'a', '8 mm', f'+{asym_h}um', '0.1mm', '0.1mm'))
cpw.append(connect('cpw4', 'Q3', 'd', 'Q1', 'a', '8 mm', f'-{asym_h}um', '0.1mm', '0.1mm'))
cpw.append(connect('cpw5', 'Q2', 'd', 'Q4', 'a', '8 mm', f'-{asym_h}um', '0.1mm', '0.1mm'))
gui.rebuild()
gui.autoscale()
from qiskit_metal.qlibrary.terminations.launchpad_wb_coupled import LaunchpadWirebondCoupled
readouts_lwc = []
control_lwc = []
offset_x = 0
offset_y = 1
#Readouts
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R1',
options = dict(
pos_x = '-5mm',
pos_y = f'-{half_chip_height+offset_y}mm',
lead_length = '30um')))
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R2',
options = dict(
pos_x = '-1mm',
pos_y = '4mm',
orientation = -90,
lead_length = '30um')))
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R3',
options = dict(
pos_x = '1mm',
pos_y = '-4mm',
orientation = 90,
lead_length = '30um')))
readouts_lwc.append(LaunchpadWirebondCoupled(design, 'R4',
options = dict(
pos_x = '5mm',
pos_y = f'{half_chip_height+offset_y}mm',
orientation = 180,
lead_length = '30um')))
#Controls
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL1',
options = dict(
pos_x = '-5mm',
pos_y = '2mm',
lead_length = '30um')))
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL2',
options = dict(
pos_x = '4mm',
pos_y = '4mm',
orientation = -90,
lead_length = '30um')))
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL3',
options = dict(
pos_x = '-4mm',
pos_y = '-4mm',
orientation = 90,
lead_length = '30um')))
control_lwc.append(LaunchpadWirebondCoupled(design, 'CL4',
options = dict(
pos_x = '5mm',
pos_y = '-2mm',
orientation = 180,
lead_length = '30um')))
gui.rebuild()
gui.autoscale()
readout_lines = []
asym_14 = 700
asym_23 = 700
options = Dict(
lead=Dict(
start_straight='330um',
end_straight='0um'),
fillet='99.99um')
readout_lines.append(connect('ol1', 'Q1', 'c', 'R1', 'tie', '8 mm', f'{asym_14}um'))
options = Dict(
lead=Dict(
start_straight='430um',
end_straight='0um'),
fillet='99.99um')
readout_lines.append(connect('ol2', 'Q2', 'c', 'R2', 'tie', '8 mm', f'{asym_23}um'))
readout_lines.append(connect('ol3', 'Q3', 'c', 'R3', 'tie', '8 mm', f'{asym_23}um'))
readout_lines.append(connect('ol4', 'Q4', 'c', 'R4', 'tie', '8 mm', f'{asym_14}um'))
gui.rebuild()
gui.autoscale()
from qiskit_metal.qlibrary.tlines.anchored_path import RouteAnchors
from collections import OrderedDict
import numpy as np
control_lines = []
def connectRouteAnchor(name: str,
component1: str, pin1: str, component2: str, pin2: str,
anchor_points: OrderedDict) -> RouteAnchors:
options_line_cl = dict(
pin_inputs = dict(start_pin = dict(component = component1, pin = pin1),
end_pin = dict(component = component2, pin = pin2)),
anchors = anchor_points,
lead = dict(start_straight = '200um',
end_straight = '225um'),
fillet = fillet
)
return RouteAnchors(design, name, options_line_cl)
anchors1c = OrderedDict()
anchors1c[0] = np.array([-4, -1.42])
anchors1c[1] = np.array([-4, 2])
control_lines.append(connectRouteAnchor('line_cl1', 'Q1', 'Charge_Line', 'CL1', 'tie', anchors1c))
anchors2c = OrderedDict()
anchors2c[0] = np.array([0.08, 3.25])
anchors2c[1] = np.array([4, 3.25])
control_lines.append(connectRouteAnchor('line_cl2', 'Q2', 'Charge_Line', 'CL2', 'tie', anchors2c))
anchors3c = OrderedDict()
anchors3c[0] = np.array([-0.08, -3.25])
anchors3c[1] = np.array([-4, -3.25])
control_lines.append(connectRouteAnchor('line_cl3', 'Q3', 'Charge_Line', 'CL3', 'tie', anchors3c))
anchors4c = OrderedDict()
anchors4c[0] = np.array([4, 1.42])
anchors4c[1] = np.array([4, -2])
control_lines.append(connectRouteAnchor('line_cl4', 'Q4', 'Charge_Line', 'CL4', 'tie', anchors4c))
gui.rebuild()
gui.autoscale()
gui.autoscale()
gui.screenshot(name="full_design")
import numpy as np
from scipy.constants import c, h, pi, hbar, e
from qiskit_metal.analyses.em.cpw_calculations import guided_wavelength
# constants:
phi0 = h/(2*e)
varphi0 = phi0/(2*pi)
# project target parameters
f_qList = np.around(np.linspace(5.25, 5.75, 4),2) # GHz
f_rList = f_qList + 1.8 # GHz
L_JJList = np.around(varphi0**2/((f_qList*1e9+300e6)**2/(8*300e6))/h*1e9, 2) # nH
# initial CPW readout lengths
def find_resonator_length(frequency, line_width, line_gap, N):
#frequency in GHz
#line_width/line_gap in um
#N -> 2 for lambda/2, 4 for lambda/4
[lambdaG, etfSqrt, q] = guided_wavelength(frequency*10**9, line_width*10**-6,
line_gap*10**-6, 750*10**-6, 200*10**-9)
return str(lambdaG/N*10**3)+" mm"
find_resonator_length(f_rList, 10, 6, 2)
find_resonator_length(np.around(np.linspace(8, 9.2, 4), 2), 10, 6, 2)
transmons[0].options.pad_gap = '40um'
transmons[0].options.pad_width = '550um' # 405
transmons[0].options.pad_height = '120um'
transmons[1].options.pad_gap = '40um'
transmons[1].options.pad_width = '390um' # 405
transmons[1].options.pad_height = '120um'
transmons[1].options.connection_pads.d.pad_gap='8um'
transmons[1].options.connection_pads.a.pad_gap='8um'
transmons[1].options.connection_pads.c.pad_gap='8um'
transmons[1].options.connection_pads.d.pad_width='140um'
transmons[1].options.connection_pads.a.pad_width='140um'
transmons[1].options.connection_pads.c.pad_width='150um'
transmons[2].options.pad_gap = '40um'
transmons[2].options.pad_width = '460um' # 405
transmons[2].options.pad_height = '120um'
transmons[3].options.pad_gap = '40um'
transmons[3].options.pad_width = '440um' # 405
transmons[3].options.pad_height = '120um'
readout_lines[0].options.total_length = '8.63mm'
readout_lines[1].options.total_length = '8.42mm'
readout_lines[2].options.total_length = '8.24mm'
readout_lines[3].options.total_length = '8.06mm'
cpw[0].options.total_length = '7mm'
cpw[1].options.total_length = '7mm'
cpw[2].options.total_length = '7mm'
cpw[3].options.total_length = '7mm'
gui.rebuild()
gui.autoscale()
qcomps = design.components # short handle (alias)
qcomps['Q1'].options['hfss_inductance'] = 'Lj1'
qcomps['Q1'].options['hfss_capacitance'] = 'Cj1'
qcomps['Q2'].options['hfss_inductance'] = 'Lj2'
qcomps['Q2'].options['hfss_capacitance'] = 'Cj2'
qcomps['Q3'].options['hfss_inductance'] = 'Lj3'
qcomps['Q3'].options['hfss_capacitance'] = 'Cj3'
qcomps['Q4'].options['hfss_inductance'] = 'Lj4'
qcomps['Q4'].options['hfss_capacitance'] = 'Cj4'
from qiskit_metal.analyses.quantization import EPRanalysis, LOManalysis
c1 = LOManalysis(design, "q3d")
q3d1 = c1.sim.renderer
q3d1.start()
q3d1.activate_ansys_design("Q2only_busopen", 'capacitive')
q3d1.render_design(['Q2', 'R2', 'cpw1', 'cpw5', 'ol2', 'line_cl2', 'CL2'], [('cpw5', 'end'),('cpw1', 'start')])
q3d1.add_q3d_setup(name="Setup", max_passes=15, min_converged_passes=5,percent_error=0.05)
q3d1.analyze_setup("Setup")
c1.sim.capacitance_matrix, c1.sim.units = q3d1.get_capacitance_matrix()
c1.sim.capacitance_all_passes, _ = q3d1.get_capacitance_all_passes()
c1.sim.capacitance_matrix
import scqubits as scq
from qiskit_metal.analyses.quantization.lumped_capacitive import load_q3d_capacitance_matrix
from qiskit_metal.analyses.quantization.lom_core_analysis import CompositeSystem, Cell, Subsystem, QuantumSystemRegistry
from scipy.constants import speed_of_light as c_light
import matplotlib.pyplot as plt
%matplotlib inline
# Cell 2: Transmon-2
opt2 = dict(
cap_mat = c1.sim.capacitance_matrix,
ind_dict = {('pad_top_Q2', 'pad_bot_Q2'): 12}, # junction inductance in nH
jj_dict = {('pad_top_Q2', 'pad_bot_Q2'): 'j2'},
cj_dict = {('pad_top_Q2', 'pad_bot_Q2'): 1}, # junction capacitance in fF
)
cell_2 = Cell(opt2)
# subsystem 1: Transmon-2
transmon2 = Subsystem(name='transmon2', sys_type='TRANSMON', nodes=['j2'])
# Resonator Subsystems
q_opts = dict(
Z0 = 50, # characteristic impedance in Ohm
vp = 0.404314 * c_light # phase velocity
)
ro2 = Subsystem(name='c_connector_pad_Q2', sys_type='TL_RESONATOR', nodes=['c_connector_pad_Q2'], q_opts=dict(f_res = 7.22, **q_opts))
coup12 = Subsystem(name='a_connector_pad_Q2', sys_type='TL_RESONATOR', nodes=['a_connector_pad_Q2'], q_opts=dict(f_res = 7.5, **q_opts))
coup24 = Subsystem(name='d_connector_pad_Q2', sys_type='TL_RESONATOR', nodes=['d_connector_pad_Q2'], q_opts=dict(f_res = 7.5, **q_opts))
composite_sys = CompositeSystem(
subsystems=[transmon2, ro2, coup12, coup24],
cells=[cell_2],
grd_node='ground_main_plane',
nodes_force_keep=['c_connector_pad_Q2', 'a_connector_pad_Q2', 'd_connector_pad_Q2']
)
cg = composite_sys.circuitGraph()
print(cg)
hilbertspace = composite_sys.create_hilbertspace()
print(hilbertspace)
hilbertspace = composite_sys.add_interaction()
hilbertspace.hamiltonian()
hamiltonian_results = composite_sys.hamiltonian_results(hilbertspace, evals_count=30)
composite_sys.compute_gs()
transmons[1].options.connection_pads.d.pad_gap
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw('mpl')
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import pulse
from qiskit.providers.fake_provider import FakeArmonk
backend = FakeArmonk()
with pulse.build(backend) as drive_sched:
d0 = pulse.drive_channel(0)
a0 = pulse.acquire_channel(0)
pulse.play(pulse.library.Constant(10, 1.0), d0)
pulse.delay(20, d0)
pulse.shift_phase(3.14/2, d0)
pulse.set_phase(3.14, d0)
pulse.shift_frequency(1e7, d0)
pulse.set_frequency(5e9, d0)
with pulse.build() as temp_sched:
pulse.play(pulse.library.Gaussian(20, 1.0, 3.0), d0)
pulse.play(pulse.library.Gaussian(20, -1.0, 3.0), d0)
pulse.call(temp_sched)
pulse.acquire(30, a0, pulse.MemorySlot(0))
drive_sched.draw()
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import pulse
d0 = pulse.DriveChannel(0)
x90 = pulse.Gaussian(10, 0.1, 3)
x180 = pulse.Gaussian(10, 0.2, 3)
def udd10_pos(j):
return np.sin(np.pi*j/(2*10 + 2))**2
with pulse.build() as udd_sched:
pulse.play(x90, d0)
with pulse.align_func(duration=300, func=udd10_pos):
for _ in range(10):
pulse.play(x180, d0)
pulse.play(x90, d0)
udd_sched.draw()
|
https://github.com/qiskit-community/qiskit-cold-atom
|
qiskit-community
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Classes for remote cold atom backends."""
import json
from typing import List, Dict, Union
import requests
from qiskit.providers.models import BackendConfiguration
from qiskit.providers import Options
from qiskit import QuantumCircuit
from qiskit.providers import ProviderV1 as Provider
from qiskit.providers import BackendV1 as Backend
from qiskit.providers import JobStatus
from qiskit_cold_atom.spins.base_spin_backend import BaseSpinBackend
from qiskit_cold_atom.fermions.base_fermion_backend import BaseFermionBackend
from qiskit_cold_atom.circuit_tools import CircuitTools
from qiskit_cold_atom.providers.cold_atom_job import ColdAtomJob
from qiskit_cold_atom.exceptions import QiskitColdAtomError
class RemoteBackend(Backend):
"""Remote cold atom backend."""
def __init__(self, provider: Provider, url: str):
"""
Initialize the backend by querying the server for the backend configuration dictionary.
Args:
provider: The provider which need to have the correct credentials attributes in place
url: The url of the backend server
Raises:
QiskitColdAtomError: If the connection to the backend server can not be established.
"""
self.url = url
self.username = provider.credentials["username"]
self.token = provider.credentials["token"]
# Get the config file from the remote server
try:
r = requests.get(
self.url + "/get_config",
params={
"username": self.username,
"token": self.token,
},
)
except requests.exceptions.ConnectionError as err:
raise QiskitColdAtomError(
"connection to the backend server can not be established."
) from err
super().__init__(configuration=BackendConfiguration.from_dict(r.json()), provider=provider)
@classmethod
def _default_options(cls) -> Options:
"""Return the default options.
Returns:
qiskit.providers.Options: A options object with default values set
"""
return Options(shots=1)
@property
def credentials(self) -> Dict[str, Union[str, List[str]]]:
"""Returns: the access credentials used."""
return self.provider().credentials
# pylint: disable=arguments-differ, unused-argument
def run(
self,
circuit: Union[QuantumCircuit, List[QuantumCircuit]],
shots: int = 1,
convert_wires: bool = True,
**run_kwargs,
) -> ColdAtomJob:
"""
Run a quantum circuit or list of quantum circuits.
Args:
circuit: The quantum circuits to be executed on the device backend
shots: The number of measurement shots to be measured for each given circuit
convert_wires: If True (the default), the circuits are converted to the wiring convention
of the backend.
run_kwargs: Additional keyword arguments that might be passed down when calling
qiskit.execute() which will have no effect on this backend.
Raises:
QiskitColdAtomError: If the response from the backend does not have a job_id.
Returns:
A Job object through the backend can be queried for status, result etc.
"""
job_payload = CircuitTools.circuit_to_cold_atom(circuit, self, shots=shots)
res = requests.post(
self.url + "/post_job",
json={
"job": json.dumps(job_payload),
"username": self.username,
"token": self.token,
},
)
res.raise_for_status()
response = res.json()
if "job_id" not in response:
raise QiskitColdAtomError("The response has no job_id.")
return ColdAtomJob(self, response["job_id"])
def retrieve_job(self, job_id: str) -> ColdAtomJob:
"""Return a single job submitted to this backend.
Args:
job_id: The ID of the job to retrieve.
Returns:
The job with the given ID.
Raises:
QiskitColdAtomError: If the job retrieval failed.
"""
retrieved_job = ColdAtomJob(backend=self, job_id=job_id)
try:
job_status = retrieved_job.status()
except requests.exceptions.RequestException as request_error:
raise QiskitColdAtomError(
"connection to the remote backend could not be established"
) from request_error
if job_status == JobStatus.ERROR:
raise QiskitColdAtomError(f"Job with id {job_id} could not be retrieved")
return retrieved_job
class RemoteSpinBackend(RemoteBackend, BaseSpinBackend):
"""Remote backend which runs spin circuits."""
class RemoteFermionBackend(RemoteBackend, BaseFermionBackend):
"""Remote backend which runs fermionic circuits."""
|
https://github.com/Tojarieh97/VQE
|
Tojarieh97
|
import numpy as np
from qiskit.opflow import X, Z, I
a_1 = np.random.random_sample()
a_2 = np.random.random_sample()
J_21 = np.random.random_sample()
transverse_ising_2_qubits = a_1*(I^X) + a_2*(X^I) + J_21*(Z^Z)
print("========== Transverse Ising Model Hamiltonian for Two Qubits ==========\n")
print(transverse_ising_2_qubits)
print()
print(transverse_ising_2_qubits.to_matrix())
print()
|
https://github.com/usamisaori/Grover
|
usamisaori
|
import qiskit
from qiskit import QuantumCircuit
from qiskit.visualization import plot_histogram
import numpy as np
from qiskit import execute, Aer
simulator = Aer.get_backend('qasm_simulator')
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from qiskit.quantum_info import DensityMatrix, Operator
# Quantum circuit to unitary matrix
def getDensityMatrix(circuit):
return DensityMatrix(circuit).data
state_0 = np.array([1, 0]) # |0>
state_1 = np.array([0, 1]) # |1>
from functools import reduce
Dag = lambda matrix: matrix.conj().T # Dag(M) = M†
Kron = lambda *matrices: reduce(np.kron, matrices) # Kron(state_0, state_0) is |00>
def pm(matrix):
for row in range(len(matrix)):
for col in range (len(matrix[row])):
print("{:.3f}".format(matrix[row][col]), end = " ")
print()
# https://qiskit.org/textbook/ch-algorithms/grover.html
def initCircuit(n):
circuit = QuantumCircuit(n, n)
for i in range(n):
circuit.h(i)
circuit.barrier()
return circuit
inputCircuit_3q = initCircuit(3)
inputCircuit_3q.draw(output='mpl')
def createOracle_6():
circuit = QuantumCircuit(3, 3)
# Oracle for find 6
# U_f
circuit.x(0)
circuit.h(2)
circuit.ccx(0, 1, 2)
circuit.h(2)
circuit.x(0)
circuit.barrier()
return circuit
oracleCircuit_6 = createOracle_6()
oracleCircuit_6.draw(output='mpl')
# where the entry that correspond to the marked item will have a negative phase
pm(Operator(oracleCircuit_6).data)
Operator(oracleCircuit_6).data
# H R H, where R = 2|0><0| - I
def createR_3q():
circuit = QuantumCircuit(3, 3)
circuit.x(2)
circuit.x(0)
circuit.x(1)
circuit.h(2)
circuit.ccx(0, 1, 2)
circuit.barrier(0)
circuit.barrier(1)
circuit.h(2)
circuit.x(2)
circuit.x(0)
circuit.x(1)
return circuit
R_3q = createR_3q()
R_3q.draw(output='mpl')
pm(Operator(R_3q).data)
print('|000>', Kron(state_0, state_0, state_0))
print('R|000>',Operator(R_3q).data @ Kron(state_0, state_0, state_0) )
print('|010>', Kron(state_0, state_1, state_0))
print('R|010>',Operator(R_3q).data @ Kron(state_0, state_1, state_0) )
print('|110>', Kron(state_1, state_1, state_0))
print('R|110>',Operator(R_3q).data @ Kron(state_1, state_1, state_0) )
# H R H
def createDiffuser_3q():
circuit = QuantumCircuit(3, 3)
circuit.h(0)
circuit.h(1)
circuit.h(2)
circuit = circuit.compose(createR_3q())
circuit.h(0)
circuit.h(1)
circuit.h(2)
circuit.barrier()
return circuit
diffuserCircuit_3q = createDiffuser_3q()
diffuserCircuit_3q.draw(output='mpl')
pm(Operator(diffuserCircuit_3q).data)
def createGroverIteration(oracle, diffuser):
return oracle.compose(diffuser)
groverIteration_3q = createGroverIteration(createOracle_6(), createDiffuser_3q())
groverIteration_3q.draw(output='mpl')
groverIteration_3q = createGroverIteration(createOracle_6(), createDiffuser_3q())
inputCircuit_3q = initCircuit(3)
inputCircuit_3q.draw(output='mpl')
inputCircuit_3q.measure([0, 1, 2], [0, 1, 2])
job = execute(inputCircuit_3q, simulator, shots = 10000)
results = job.result()
counts = results.get_counts(inputCircuit_3q)
print(counts)
plot_histogram(counts, figsize=(10, 5), color="#FFFFCC", title="superposition state - 3 qubits")
grover_3q_1 = initCircuit(3).compose(groverIteration_3q.copy())
grover_3q_1.draw(output='mpl')
grover_3q_1.measure([0, 1, 2], [0, 1, 2])
job = execute(grover_3q_1, simulator, shots = 10000)
results = job.result()
counts = results.get_counts(grover_3q_1)
print(counts)
plot_histogram(counts, figsize=(10, 5), color="#FFBB00", title="one iteration - find 6")
grover_3q_2 = initCircuit(3).compose(groverIteration_3q.copy()).compose(groverIteration_3q.copy())
# grover_3q_2.draw(output='mpl')
grover_3q_2.measure([0, 1, 2], [0, 1, 2])
job = execute(grover_3q_2, simulator, shots = 10000)
results = job.result()
counts = results.get_counts(grover_3q_2)
print(counts)
plot_histogram(counts, figsize=(10, 5), color="#FF8822", title="two iteration - find 6")
grover_3q_3 = initCircuit(3).compose(groverIteration_3q.copy()).compose(groverIteration_3q.copy()).compose(groverIteration_3q.copy())
# grover_3q_3.draw(output='mpl')
grover_3q_3.measure([0, 1, 2], [0, 1, 2])
job = execute(grover_3q_3, simulator, shots = 10000)
results = job.result()
counts = results.get_counts(grover_3q_3)
print(counts)
plot_histogram(counts, figsize=(10, 5), color="#FFFF3F", title="three iteration - find 6")
|
https://github.com/indian-institute-of-science-qc/qiskit-aakash
|
indian-institute-of-science-qc
|
from qiskit import *
import numpy as np
%matplotlib inline
qc = QuantumCircuit(1,1)
qc.s(0)
qc.measure(0,0,basis='X')
backend = BasicAer.get_backend('dm_simulator')
run = execute(qc,backend)
result = run.result()
result['results'][0]['data']['densitymatrix']
qc1 = QuantumCircuit(1,1)
qc1.s(0)
qc1.measure(0,0, basis='N', add_param=np.array([1,2,3]))
backend = BasicAer.get_backend('dm_simulator')
run1 = execute(qc1,backend)
result1 = run1.result()
result1['results'][0]['data']['densitymatrix']
qc2 = QuantumCircuit(3,2)
qc2.measure(0,0,basis='Bell',add_param='01')
options2 = {
'plot': True
}
backend = BasicAer.get_backend('dm_simulator')
run2 = execute(qc2,backend,**options2)
result2 = run2.result()
result2['results'][0]['data']['bell_probabilities01']
qc3 = QuantumCircuit(3,3)
qc3.h(0)
qc3.cx(0,1)
qc3.cx(1,2)
qc3.measure(0,0,basis='Ensemble',add_param='X')
backend = BasicAer.get_backend('dm_simulator')
options3 = {
'plot': True
}
run3 = execute(qc3,backend,**options3)
result3 = run3.result()
result3['results'][0]['data']['ensemble_probability']
qc4 = QuantumCircuit(3,3)
qc4.h(0)
qc4.cx(0,1)
qc4.cx(1,2)
qc4.measure(0,0,basis='Expect',add_param='ZIZ')
backend = BasicAer.get_backend('dm_simulator')
run4 = execute(qc4,backend)
result4 = run4.result()
result4['results'][0]['data']['Pauli_string_expectation']
|
https://github.com/carstenblank/dc-qiskit-qml
|
carstenblank
|
# -*- coding: utf-8 -*-
# Copyright 2018 Carsten Blank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
QmlGenericStateCircuitBuilder
==============================
.. currentmodule:: dc_qiskit_qml.distance_based.hadamard.state._QmlGenericStateCircuitBuilder
The generic state circuit builder classically computes the necessary quantum state vector (sparse) and will use a
state preparing quantum routine that takes the state vector and creates a circuit. This state preparing quantum routine
must implement :py:class:`QmlSparseVectorFactory`.
.. autosummary::
:nosignatures:
QmlGenericStateCircuitBuilder
QmlGenericStateCircuitBuilder
##############################
.. autoclass:: QmlGenericStateCircuitBuilder
:members:
"""
import logging
from typing import List, Optional
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from scipy import sparse
from ._QmlStateCircuitBuilder import QmlStateCircuitBuilder
from .sparsevector import QmlSparseVectorStatePreparation
log = logging.getLogger('QmlGenericStateCircuitBuilder')
class RegisterSizes:
count_of_samples: int
index_of_samples_qubits: int
sample_space_dimensions_qubits: int
ancilla_qubits: int
label_qubits: int
total_qubits: int
def __init__(self, count_of_samples, index_of_samples_qubits, sample_space_dimensions_qubits,
ancilla_qubits, label_qubits, total_qubits):
# type: (int, int, int, int, int, int) -> None
self.count_of_samples = count_of_samples
self.index_of_samples_qubits = index_of_samples_qubits
self.sample_space_dimensions_qubits = sample_space_dimensions_qubits
self.ancilla_qubits = ancilla_qubits
self.label_qubits = label_qubits
self.total_qubits = total_qubits
class QmlGenericStateCircuitBuilder(QmlStateCircuitBuilder):
"""
From generic training and testing data creates the quantum state vector and applies a quantum algorithm to create
a circuit.
"""
def __init__(self, state_preparation):
# type: (QmlGenericStateCircuitBuilder, QmlSparseVectorStatePreparation) -> None
"""
Create a new object, use the state preparation routine
:param state_preparation: The quantum state preparation routine to encode the quantum state
"""
self.state_preparation = state_preparation
self._last_state_vector = None # type: sparse.dok_matrix
@staticmethod
def get_binary_representation(sample_index, sample_label, entry_index, is_input, register_sizes):
# type: (int, int, int, bool, RegisterSizes) -> str
"""
Computes the binary representation of the quantum state as `str` given indices and qubit lengths
:param sample_index: the training data sample index
:param sample_label: the training data label
:param entry_index: the data sample vector index
:param is_input: True if the we encode the input instead of the training vector
:param register_sizes: qubits needed for the all registers
:return: binary representation of which the quantum state being addressed
"""
sample_index_b = "{0:b}".format(sample_index).zfill(register_sizes.index_of_samples_qubits)
sample_label_b = "{0:b}".format(sample_label).zfill(register_sizes.label_qubits)
ancillary_b = '0' if is_input else '1'
entry_index_b = "{0:b}".format(entry_index).zfill(register_sizes.sample_space_dimensions_qubits)
# Here we compose the qubit, the ordering will be essential
# However keep in mind, that the order is LSB
qubit_composition = [
sample_label_b,
entry_index_b,
sample_index_b,
ancillary_b
]
return "".join(qubit_composition)
@staticmethod
def _get_register_sizes(X_train, y_train):
# type: (List[sparse.dok_matrix], np.ndarray) -> RegisterSizes
count_of_samples, sample_space_dimension = len(X_train), X_train[0].get_shape()[0]
count_of_distinct_classes = len(set(y_train))
index_of_samples_qubits_needed = (count_of_samples - 1).bit_length()
sample_space_dimensions_qubits_needed = (sample_space_dimension - 1).bit_length()
ancilla_qubits_needed = 1
label_qubits_needed = (count_of_distinct_classes - 1).bit_length() if count_of_distinct_classes > 1 else 1
total_qubits_needed = (index_of_samples_qubits_needed + ancilla_qubits_needed
+ sample_space_dimensions_qubits_needed + label_qubits_needed)
return RegisterSizes(
count_of_samples,
index_of_samples_qubits_needed,
sample_space_dimensions_qubits_needed,
ancilla_qubits_needed,
label_qubits_needed,
total_qubits_needed
)
@staticmethod
def assemble_state_vector(X_train, y_train, X_input):
# type: (List[sparse.dok_matrix], any, sparse.dok_matrix) -> sparse.dok_matrix
"""
This method assembles the state vector for given data. The X data for training (which is a list of sparse
vectors) is encoded into a sub-space, in the other orthogonal sub-space the same input is encoded everywhere.
Also, the label is encoded.
A note: this method is used in conjunction with a generic state preparation, and this may not be the most
efficient way.
:param X_train: The training data set
:param y_train: the training class label data set
:param X_input: the unclassified input data vector
:return: The
"""
register_sizes = QmlGenericStateCircuitBuilder._get_register_sizes(X_train, y_train)
state_vector = sparse.dok_matrix((2 ** register_sizes.total_qubits, 1), dtype=complex) # type: sparse.dok_matrix
factor = 2 * register_sizes.count_of_samples * 1.0
for index_sample, (sample, label) in enumerate(zip(X_train, y_train)):
for (i, j), sample_i in sample.items():
qubit_state = QmlGenericStateCircuitBuilder.get_binary_representation(
index_sample, label, i, is_input=False, register_sizes=register_sizes
)
state_index = int(qubit_state, 2)
log.debug("Sample Entry: %s (%d): %.2f.", qubit_state, state_index, sample_i)
state_vector[state_index, 0] = sample_i
for (i, j), input_i in X_input.items():
qubit_state = QmlGenericStateCircuitBuilder.get_binary_representation(
index_sample, label, i, is_input=True, register_sizes=register_sizes
)
state_index = int(qubit_state, 2)
log.debug("Input Entry: %s (%d): %.2f.", qubit_state, state_index, input_i)
state_vector[state_index, 0] = input_i
state_vector = state_vector * (1 / np.sqrt(factor))
return state_vector
def build_circuit(self, circuit_name, X_train, y_train, X_input):
# type: (QmlGenericStateCircuitBuilder, str, List[sparse.dok_matrix], any, sparse.dok_matrix) -> QuantumCircuit
"""
Build a circuit that encodes the training (samples/labels) and input data sets into a quantum circuit.
It does so by iterating through the training data set with labels and constructs upon sample index and
vector position the to be modified amplitude. The state vector is stored into a sparse matrix of
shape (n,1) which is stored and can be accessed through :py:func:`get_last_state_vector` for
debugging purposes.
Then the routine uses a :py:class:`QmlSparseVectorStatePreparation` routine to encode the calculated state
vector into a quantum circuit.
:param circuit_name: The name of the quantum circuit
:param X_train: The training data set
:param y_train: the training class label data set
:param X_input: the unclassified input data vector
:return: The circuit containing the gates to encode the input data
"""
log.debug("Preparing state.")
log.debug("Raw Input Vector: %s" % X_input)
register_sizes = QmlGenericStateCircuitBuilder._get_register_sizes(X_train, y_train)
log.info("Qubit map: index=%d, ancillary=%d, feature=%d, label=%d", register_sizes.index_of_samples_qubits,
register_sizes.ancilla_qubits, register_sizes.sample_space_dimensions_qubits,
register_sizes.label_qubits)
ancilla = QuantumRegister(register_sizes.ancilla_qubits, "a")
index = QuantumRegister(register_sizes.index_of_samples_qubits, "i")
data = QuantumRegister(register_sizes.sample_space_dimensions_qubits, "f^S")
qlabel = QuantumRegister(register_sizes.label_qubits, "l^q")
clabel = ClassicalRegister(register_sizes.label_qubits, "l^c")
branch = ClassicalRegister(1, "b")
qc = QuantumCircuit(ancilla, index, data, qlabel, clabel, branch, name=circuit_name) # type: QuantumCircuit
self._last_state_vector = QmlGenericStateCircuitBuilder.assemble_state_vector(X_train, y_train, X_input)
return self.state_preparation.prepare_state(qc, self._last_state_vector)
def is_classifier_branch(self, branch_value):
# type: (QmlGenericStateCircuitBuilder, int) -> bool
"""
As each state preparation algorithm uses a unique layout. Here the :py:class:`QmlSparseVectorFactory`
is asked how the branch for post selection can be identified.
:param branch_value: The measurement of the branch
:return: True is the branch is containing the classification, False if not
"""
return self.state_preparation.is_classifier_branch(branch_value)
def get_last_state_vector(self):
# type: (QmlGenericStateCircuitBuilder) -> Optional[sparse.dok_matrix]
"""
From the last call of :py:func:`build_circuit` the computed (sparse) state vector.
:return: a sparse vector (shape (n,0)) if present
"""
return self._last_state_vector
|
https://github.com/TRSasasusu/qiskit-quantum-zoo
|
TRSasasusu
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains the definition of a base class for quantum fourier transforms.
"""
from abc import abstractmethod
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.aqua import Pluggable, AquaError
class QFT(Pluggable):
"""Base class for QFT.
This method should initialize the module and its configuration, and
use an exception if a component of the module is
available.
Args:
configuration (dict): configuration dictionary
"""
@abstractmethod
def __init__(self, *args, **kwargs):
super().__init__()
@classmethod
def init_params(cls, params):
qft_params = params.get(Pluggable.SECTION_KEY_QFT)
kwargs = {k: v for k, v in qft_params.items() if k != 'name'}
return cls(**kwargs)
@abstractmethod
def _build_matrix(self):
raise NotImplementedError
@abstractmethod
def _build_circuit(self, qubits=None, circuit=None, do_swaps=True):
raise NotImplementedError
def construct_circuit(self, mode='circuit', qubits=None, circuit=None, do_swaps=True):
"""Construct the circuit.
Args:
mode (str): 'matrix' or 'circuit'
qubits (QuantumRegister or qubits): register or qubits to build the circuit on.
circuit (QuantumCircuit): circuit for construction.
do_swaps (bool): include the swaps.
Returns:
The matrix or circuit depending on the specified mode.
"""
if mode == 'circuit':
return self._build_circuit(qubits=qubits, circuit=circuit, do_swaps=do_swaps)
elif mode == 'matrix':
return self._build_matrix()
else:
raise AquaError('Unrecognized mode: {}.'.format(mode))
|
https://github.com/hephaex/Quantum-Computing-Awesome-List
|
hephaex
|
from qiskit import QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram, plot_bloch_vector
from math import sqrt, pi
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
qc = QuantumCircuit(1)
initial_state = [0,1]
qc.initialize(initial_state, 0)
qc.draw()
backend = Aer.get_backend('statevector_simulator')
result = execute(qc,backend).result()
state_vector = result.get_statevector()
print(state_vector)
qc.measure_all()
qc.draw()
result = execute(qc, backend).result()
counts = result.get_counts()
plot_histogram(counts)
initial_state = [1/sqrt(2), complex(0,1/sqrt(2))]
qc = QuantumCircuit(1)
qc.initialize(initial_state, 0)
qc.draw()
backend = Aer.get_backend('statevector_simulator')
result = execute(qc, backend).result()
statevector = result.get_statevector()
print(statevector)
counts = result.get_counts()
plot_histogram(counts)
initialize_state = [1/sqrt(3), sqrt(2/3)]
qc = QuantumCircuit(1)
qc.initialize(initialize_state, 0)
qc.draw()
result = execute(qc, backend).result()
counts = result.get_counts()
plot_histogram(counts)
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wrong-import-order
"""Main Qiskit public functionality."""
import pkgutil
# First, check for required Python and API version
from . import util
# qiskit errors operator
from .exceptions import QiskitError
# The main qiskit operators
from qiskit.circuit import ClassicalRegister
from qiskit.circuit import QuantumRegister
from qiskit.circuit import QuantumCircuit
# pylint: disable=redefined-builtin
from qiskit.tools.compiler import compile # TODO remove after 0.8
from qiskit.execute import execute
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions
import qiskit.circuit.measure
import qiskit.circuit.reset
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports or any non-import code AND *before*
# importing the package you want to allow extensions for (in this case `backends`).
__path__ = pkgutil.extend_path(__path__, __name__)
# Please note these are global instances, not modules.
from qiskit.providers.basicaer import BasicAer
# Try to import the Aer provider if installed.
try:
from qiskit.providers.aer import Aer
except ImportError:
pass
# Try to import the IBMQ provider if installed.
try:
from qiskit.providers.ibmq import IBMQ
except ImportError:
pass
from .version import __version__
from .version import __qiskit_version__
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for BackendSampler."""
import math
import unittest
from unittest.mock import patch
from test import combine
from test.python.transpiler._dummy_passes import DummyTP
import numpy as np
from ddt import ddt
from qiskit import QuantumCircuit
from qiskit.circuit.library import RealAmplitudes
from qiskit.primitives import BackendSampler, SamplerResult
from qiskit.providers import JobStatus, JobV1
from qiskit.providers.fake_provider import FakeNairobi, FakeNairobiV2
from qiskit.providers.basicaer import QasmSimulatorPy
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PassManager
from qiskit.utils import optionals
BACKENDS = [FakeNairobi(), FakeNairobiV2()]
@ddt
class TestBackendSampler(QiskitTestCase):
"""Test BackendSampler"""
def setUp(self):
super().setUp()
hadamard = QuantumCircuit(1, 1)
hadamard.h(0)
hadamard.measure(0, 0)
bell = QuantumCircuit(2)
bell.h(0)
bell.cx(0, 1)
bell.measure_all()
self._circuit = [hadamard, bell]
self._target = [
{0: 0.5, 1: 0.5},
{0: 0.5, 3: 0.5, 1: 0, 2: 0},
]
self._pqc = RealAmplitudes(num_qubits=2, reps=2)
self._pqc.measure_all()
self._pqc2 = RealAmplitudes(num_qubits=2, reps=3)
self._pqc2.measure_all()
self._pqc_params = [[0.0] * 6, [1.0] * 6]
self._pqc_target = [{0: 1}, {0: 0.0148, 1: 0.3449, 2: 0.0531, 3: 0.5872}]
self._theta = [
[0, 1, 1, 2, 3, 5],
[1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6, 7],
]
def _generate_circuits_target(self, indices):
if isinstance(indices, list):
circuits = [self._circuit[j] for j in indices]
target = [self._target[j] for j in indices]
else:
raise ValueError(f"invalid index {indices}")
return circuits, target
def _generate_params_target(self, indices):
if isinstance(indices, int):
params = self._pqc_params[indices]
target = self._pqc_target[indices]
elif isinstance(indices, list):
params = [self._pqc_params[j] for j in indices]
target = [self._pqc_target[j] for j in indices]
else:
raise ValueError(f"invalid index {indices}")
return params, target
def _compare_probs(self, prob, target):
if not isinstance(prob, list):
prob = [prob]
if not isinstance(target, list):
target = [target]
self.assertEqual(len(prob), len(target))
for p, targ in zip(prob, target):
for key, t_val in targ.items():
if key in p:
self.assertAlmostEqual(p[key], t_val, delta=0.1)
else:
self.assertAlmostEqual(t_val, 0, delta=0.1)
@combine(backend=BACKENDS)
def test_sampler_run(self, backend):
"""Test Sampler.run()."""
bell = self._circuit[1]
sampler = BackendSampler(backend=backend)
job = sampler.run(circuits=[bell], shots=1000)
self.assertIsInstance(job, JobV1)
result = job.result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(result.quasi_dists[0].shots, 1000)
self.assertEqual(result.quasi_dists[0].stddev_upper_bound, math.sqrt(1 / 1000))
self._compare_probs(result.quasi_dists, self._target[1])
@combine(backend=BACKENDS)
def test_sample_run_multiple_circuits(self, backend):
"""Test Sampler.run() with multiple circuits."""
# executes three Bell circuits
# Argument `parameters` is optional.
bell = self._circuit[1]
sampler = BackendSampler(backend=backend)
result = sampler.run([bell, bell, bell]).result()
# print([q.binary_probabilities() for q in result.quasi_dists])
self._compare_probs(result.quasi_dists[0], self._target[1])
self._compare_probs(result.quasi_dists[1], self._target[1])
self._compare_probs(result.quasi_dists[2], self._target[1])
@combine(backend=BACKENDS)
def test_sampler_run_with_parameterized_circuits(self, backend):
"""Test Sampler.run() with parameterized circuits."""
# parameterized circuit
pqc = self._pqc
pqc2 = self._pqc2
theta1, theta2, theta3 = self._theta
sampler = BackendSampler(backend=backend)
result = sampler.run([pqc, pqc, pqc2], [theta1, theta2, theta3]).result()
# result of pqc(theta1)
prob1 = {
"00": 0.1309248462975777,
"01": 0.3608720796028448,
"10": 0.09324865232050054,
"11": 0.41495442177907715,
}
self.assertDictAlmostEqual(result.quasi_dists[0].binary_probabilities(), prob1, delta=0.1)
# result of pqc(theta2)
prob2 = {
"00": 0.06282290651933871,
"01": 0.02877144385576705,
"10": 0.606654494132085,
"11": 0.3017511554928094,
}
self.assertDictAlmostEqual(result.quasi_dists[1].binary_probabilities(), prob2, delta=0.1)
# result of pqc2(theta3)
prob3 = {
"00": 0.1880263994380416,
"01": 0.6881971261189544,
"10": 0.09326232720582443,
"11": 0.030514147237179892,
}
self.assertDictAlmostEqual(result.quasi_dists[2].binary_probabilities(), prob3, delta=0.1)
@combine(backend=BACKENDS)
def test_run_1qubit(self, backend):
"""test for 1-qubit cases"""
qc = QuantumCircuit(1)
qc.measure_all()
qc2 = QuantumCircuit(1)
qc2.x(0)
qc2.measure_all()
sampler = BackendSampler(backend=backend)
result = sampler.run([qc, qc2]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 2)
self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1}, 0.1)
self.assertDictAlmostEqual(result.quasi_dists[1], {1: 1}, 0.1)
@combine(backend=BACKENDS)
def test_run_2qubit(self, backend):
"""test for 2-qubit cases"""
qc0 = QuantumCircuit(2)
qc0.measure_all()
qc1 = QuantumCircuit(2)
qc1.x(0)
qc1.measure_all()
qc2 = QuantumCircuit(2)
qc2.x(1)
qc2.measure_all()
qc3 = QuantumCircuit(2)
qc3.x([0, 1])
qc3.measure_all()
sampler = BackendSampler(backend=backend)
result = sampler.run([qc0, qc1, qc2, qc3]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 4)
self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1}, 0.1)
self.assertDictAlmostEqual(result.quasi_dists[1], {1: 1}, 0.1)
self.assertDictAlmostEqual(result.quasi_dists[2], {2: 1}, 0.1)
self.assertDictAlmostEqual(result.quasi_dists[3], {3: 1}, 0.1)
@combine(backend=BACKENDS)
def test_run_errors(self, backend):
"""Test for errors"""
qc1 = QuantumCircuit(1)
qc1.measure_all()
qc2 = RealAmplitudes(num_qubits=1, reps=1)
qc2.measure_all()
sampler = BackendSampler(backend=backend)
with self.assertRaises(ValueError):
sampler.run([qc1], [[1e2]]).result()
with self.assertRaises(ValueError):
sampler.run([qc2], [[]]).result()
with self.assertRaises(ValueError):
sampler.run([qc2], [[1e2]]).result()
@combine(backend=BACKENDS)
def test_run_empty_parameter(self, backend):
"""Test for empty parameter"""
n = 5
qc = QuantumCircuit(n, n - 1)
qc.measure(range(n - 1), range(n - 1))
sampler = BackendSampler(backend=backend)
with self.subTest("one circuit"):
result = sampler.run([qc], shots=1000).result()
self.assertEqual(len(result.quasi_dists), 1)
for q_d in result.quasi_dists:
quasi_dist = {k: v for k, v in q_d.items() if v != 0.0}
self.assertDictAlmostEqual(quasi_dist, {0: 1.0}, delta=0.1)
self.assertEqual(len(result.metadata), 1)
with self.subTest("two circuits"):
result = sampler.run([qc, qc], shots=1000).result()
self.assertEqual(len(result.quasi_dists), 2)
for q_d in result.quasi_dists:
quasi_dist = {k: v for k, v in q_d.items() if v != 0.0}
self.assertDictAlmostEqual(quasi_dist, {0: 1.0}, delta=0.1)
self.assertEqual(len(result.metadata), 2)
@combine(backend=BACKENDS)
def test_run_numpy_params(self, backend):
"""Test for numpy array as parameter values"""
qc = RealAmplitudes(num_qubits=2, reps=2)
qc.measure_all()
k = 5
params_array = np.random.rand(k, qc.num_parameters)
params_list = params_array.tolist()
params_list_array = list(params_array)
sampler = BackendSampler(backend=backend)
target = sampler.run([qc] * k, params_list).result()
with self.subTest("ndarrary"):
result = sampler.run([qc] * k, params_array).result()
self.assertEqual(len(result.metadata), k)
for i in range(k):
self.assertDictAlmostEqual(result.quasi_dists[i], target.quasi_dists[i], delta=0.1)
with self.subTest("list of ndarray"):
result = sampler.run([qc] * k, params_list_array).result()
self.assertEqual(len(result.metadata), k)
for i in range(k):
self.assertDictAlmostEqual(result.quasi_dists[i], target.quasi_dists[i], delta=0.1)
@combine(backend=BACKENDS)
def test_run_with_shots_option(self, backend):
"""test with shots option."""
params, target = self._generate_params_target([1])
sampler = BackendSampler(backend=backend)
result = sampler.run(
circuits=[self._pqc], parameter_values=params, shots=1024, seed=15
).result()
self._compare_probs(result.quasi_dists, target)
@combine(backend=BACKENDS)
def test_primitive_job_status_done(self, backend):
"""test primitive job's status"""
bell = self._circuit[1]
sampler = BackendSampler(backend=backend)
job = sampler.run(circuits=[bell])
_ = job.result()
self.assertEqual(job.status(), JobStatus.DONE)
def test_primitive_job_size_limit_backend_v2(self):
"""Test primitive respects backend's job size limit."""
class FakeNairobiLimitedCircuits(FakeNairobiV2):
"""FakeNairobiV2 with job size limit."""
@property
def max_circuits(self):
return 1
qc = QuantumCircuit(1)
qc.measure_all()
qc2 = QuantumCircuit(1)
qc2.x(0)
qc2.measure_all()
sampler = BackendSampler(backend=FakeNairobiLimitedCircuits())
result = sampler.run([qc, qc2]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 2)
self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1}, 0.1)
self.assertDictAlmostEqual(result.quasi_dists[1], {1: 1}, 0.1)
def test_primitive_job_size_limit_backend_v1(self):
"""Test primitive respects backend's job size limit."""
backend = FakeNairobi()
config = backend.configuration()
config.max_experiments = 1
backend._configuration = config
qc = QuantumCircuit(1)
qc.measure_all()
qc2 = QuantumCircuit(1)
qc2.x(0)
qc2.measure_all()
sampler = BackendSampler(backend=backend)
result = sampler.run([qc, qc2]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 2)
self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1}, 0.1)
self.assertDictAlmostEqual(result.quasi_dists[1], {1: 1}, 0.1)
@unittest.skipUnless(optionals.HAS_AER, "qiskit-aer is required to run this test")
def test_circuit_with_dynamic_circuit(self):
"""Test BackendSampler with QuantumCircuit with a dynamic circuit"""
from qiskit_aer import Aer
qc = QuantumCircuit(2, 1)
with qc.for_loop(range(5)):
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
qc.break_loop().c_if(0, True)
backend = Aer.get_backend("aer_simulator")
backend.set_options(seed_simulator=15)
sampler = BackendSampler(backend, skip_transpilation=True)
sampler.set_transpile_options(seed_transpiler=15)
result = sampler.run(qc).result()
self.assertDictAlmostEqual(result.quasi_dists[0], {0: 0.5029296875, 1: 0.4970703125})
def test_sequential_run(self):
"""Test sequential run."""
qc = QuantumCircuit(1)
qc.measure_all()
qc2 = QuantumCircuit(1)
qc2.x(0)
qc2.measure_all()
sampler = BackendSampler(backend=FakeNairobi())
result = sampler.run([qc]).result()
self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1}, 0.1)
result2 = sampler.run([qc2]).result()
self.assertDictAlmostEqual(result2.quasi_dists[0], {1: 1}, 0.1)
result3 = sampler.run([qc, qc2]).result()
self.assertDictAlmostEqual(result3.quasi_dists[0], {0: 1}, 0.1)
self.assertDictAlmostEqual(result3.quasi_dists[1], {1: 1}, 0.1)
def test_outcome_bitstring_size(self):
"""Test that the result bitstrings are properly padded.
E.g. measuring '0001' should not get truncated to '1'.
"""
qc = QuantumCircuit(4)
qc.x(0)
qc.measure_all()
# We need a noise-free backend here (shot noise is fine) to ensure that
# the only bit string measured is "0001". With device noise, it could happen that
# strings with a leading 1 are measured and then the truncation cannot be tested.
sampler = BackendSampler(backend=QasmSimulatorPy())
result = sampler.run(qc).result()
probs = result.quasi_dists[0].binary_probabilities()
self.assertIn("0001", probs.keys())
self.assertEqual(len(probs), 1)
def test_bound_pass_manager(self):
"""Test bound pass manager."""
with self.subTest("Test single circuit"):
dummy_pass = DummyTP()
with patch.object(DummyTP, "run", wraps=dummy_pass.run) as mock_pass:
bound_pass = PassManager(dummy_pass)
sampler = BackendSampler(backend=FakeNairobi(), bound_pass_manager=bound_pass)
_ = sampler.run(self._circuit[0]).result()
self.assertEqual(mock_pass.call_count, 1)
with self.subTest("Test circuit batch"):
dummy_pass = DummyTP()
with patch.object(DummyTP, "run", wraps=dummy_pass.run) as mock_pass:
bound_pass = PassManager(dummy_pass)
sampler = BackendSampler(backend=FakeNairobi(), bound_pass_manager=bound_pass)
_ = sampler.run([self._circuit[0], self._circuit[0]]).result()
self.assertEqual(mock_pass.call_count, 2)
if __name__ == "__main__":
unittest.main()
|
https://github.com/hidemita/QiskitExamples
|
hidemita
|
# Useful additional packages
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import math
import random
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.tools.visualization import circuit_drawer
from qiskit import BasicAer, Aer
qc = QuantumCircuit(65, 32)
# Z-type Stabilizers
Zstab = [
[47, [0,3,5]],
[48, [1,3,4,6]],
[49, [2,4,7]],
[50, [5,8,10]],
[51, [6,8,9,11]],
[52, [7,9,12]],
[53, [10,13,15]],
[54, [11,13,14,16]],
[55, [12,14,17]],
[56, [15,18,20]],
[57, [16,18,19,21]],
[58, [17,19,22]],
[59, [20,23,25]],
[60, [21,23,24,26]],
[61, [22,24,27]],
[62, [25,28,30]],
[63, [26,28,29,31]],
[64, [27,29,32]]
]
# X-type Stabilizers
Xstab = [
[33, [0,1,3]],
[34, [1,2,4]],
[35, [3,5,6,8]],
[36, [4,6,7,9]],
[37, [8,10,11,13]],
[38, [9,11,12,14]],
[39, [13,15,16,18]],
[40, [14,16,17,19]],
[41, [18,20,21,23]],
[42, [19,21,22,24]],
[43, [23,25,26,28]],
[44, [24,26,27,29]],
[45, [28,30,31]],
[46, [29,31,32]]
]
# Make stabilizer state
Xinitialize = [
[0, 1, 3],
[2, 1, 4],
[5, 3, 6, 8],
[7, 4, 6, 9],
[10, 8, 11, 13],
[12, 9, 11, 14],
[15, 13, 16, 18],
[17, 14, 16, 19],
[20, 18, 21, 23],
[22, 19, 21, 24],
[25, 23, 26, 28],
[27, 24, 26, 29],
[30, 28, 31],
[32, 29, 31],
]
for x in Xinitialize:
q0 = x[0]
qc.h(q0)
for q1 in x[1:]:
qc.cx(q0, q1)
# Place stabilizers
for z in Zstab:
qc.h(z[0])
for q in z[1]:
qc.cz(z[0], q)
qc.h(z[0])
for x in Xstab:
qc.h(x[0])
for q in x[1]:
qc.cx(x[0], q)
qc.h(x[0])
# Measurement stabilizer errors
for m, q in enumerate(Zstab + Xstab):
qc.measure(q[0], m)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=1000)
job.result().get_counts(qc)
qc = QuantumCircuit(65, 32+1) # classical bit-32 for logical bit test
# Z-type Stabilizers
Zstab = [
[47, [0,3,5]],
[48, [1,3,4,6]],
[49, [2,4,7]],
[50, [5,8,10]],
# [51, [6,8,9,11]], # defect Z-cut hole 1
[52, [7,9,12]],
[53, [10,13,15]],
[54, [11,13,14,16]],
[55, [12,14,17]],
[56, [15,18,20]],
[57, [16,18,19,21]],
[58, [17,19,22]],
[59, [20,23,25]],
# [60, [21,23,24,26]], # defect Z-cut hole 2
[61, [22,24,27]],
[62, [25,28,30]],
[63, [26,28,29,31]],
[64, [27,29,32]]
]
# X-type Stabilizers
Xstab = [
[33, [0,1,3]],
[34, [1,2,4]],
[35, [3,5,6,8]],
[36, [4,6,7,9]],
[37, [8,10,11,13]],
[38, [9,11,12,14]],
[39, [13,15,16,18]],
[40, [14,16,17,19]],
[41, [18,20,21,23]],
[42, [19,21,22,24]],
[43, [23,25,26,28]],
[44, [24,26,27,29]],
[45, [28,30,31]],
[46, [29,31,32]]
]
# Make stabilizer state
Xinitialize = [
[0, 1, 3],
[2, 1, 4],
[5, 3, 6, 8],
[7, 4, 6, 9],
[10, 8, 11, 13],
[12, 9, 11, 14],
[15, 13, 16, 18],
[17, 14, 16, 19],
[20, 18, 21, 23],
[22, 19, 21, 24],
[25, 23, 26, 28],
[27, 24, 26, 29],
[30, 28, 31],
[32, 29, 31],
]
for x in Xinitialize:
q0 = x[0]
qc.h(q0)
for q1 in x[1:]:
qc.cx(q0, q1)
# Place stabilizers
for z in Zstab:
qc.h(z[0])
for q in z[1]:
qc.cz(z[0], q)
qc.h(z[0])
for x in Xstab:
qc.h(x[0])
for q in x[1]:
qc.cx(x[0], q)
qc.h(x[0])
# logical X-operator
logical_x = True
if logical_x:
qc.x(11)
qc.x(16)
qc.x(21)
# Measurement for logical bit of # [51, [6,8,9,11]], # defect Z-cut hole 1
qc.h(51)
qc.cz(51, 6)
qc.cz(51, 8)
qc.cz(51, 9)
qc.cz(51, 11)
qc.h(51)
# Measurement stabilizer errors
for m, q in enumerate(Zstab + Xstab):
qc.measure(q[0], m)
# Z-Measurement of logical bit (Observe bit-32 change, with no stabilizer errors)
qc.measure(51, 32)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=1000)
job.result().get_counts(qc)
|
https://github.com/mathelatics/QGSS-2023-From-Theory-to-Implementations
|
mathelatics
|
my_list = [1,2,3 ,4,5, 6,8,8,3,23,7,75,54,3]
def my_oracle(my_input):
winner=7
if my_input is winner:
response = True
else:
response = False
return response
for index, trial_number in enumerate(my_list):
if my_oracle(trial_number) is True:
print('Winner found at index :%i'%index)
print('%i calls to the Oracle used'%(index+1))
break
from qiskit import *
from qiskit.qiskit_aer import Aer
from qiskit import QuantumCircuit
import matplotlib.pyplot as plt
import numpy as np
oracle = QuantumCircuit(2, name='oracle')
oracle.cz(0,1)
oracle.to_gate()
oracle.draw(output='mpl')
backend = Aer.get_backend('statevector_simulator')
grover_circ = QuantumCircuit(2,2)
grover_circ.h(0)
grover_circ.append(oracle,[0,1])
grover_circ.draw(output='mpl')
job = execute(grover_circ, backend)
result = job.result()
sv = result.get_statevector()
np.arount(sv, 2)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
top = QuantumCircuit(1)
top.x(0);
bottom = QuantumCircuit(2)
bottom.cry(0.2, 0, 1);
tensored = bottom.tensor(top)
tensored.draw('mpl')
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.mappers.second_quantization import LogarithmicMapper
mapper = LogarithmicMapper(2)
from qiskit_nature.second_q.mappers import LogarithmicMapper
mapper = LogarithmicMapper(2)
from qiskit_nature.second_q.mappers import LogarithmicMapper
mapper = LogarithmicMapper(padding=2)
from qiskit_nature.circuit.library import HartreeFock
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
converter = QubitConverter(JordanWignerMapper())
init_state = HartreeFock(num_spin_orbitals=6, num_particles=(2, 1), qubit_converter=converter)
print(init_state.draw())
from qiskit_nature.second_q.circuit.library import HartreeFock
from qiskit_nature.second_q.mappers import JordanWignerMapper, QubitConverter
converter = QubitConverter(JordanWignerMapper())
init_state = HartreeFock(num_spatial_orbitals=3, num_particles=(2, 1), qubit_converter=converter)
print(init_state.draw())
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/hephaex/Quantum-Computing-Awesome-List
|
hephaex
|
import cirq
## Data qubit and target qubit
q0, q1 = cirq.LineQubit.range(2)
## Dictionary of oracles
oracles = {'0' : [], '1' : [cirq.X(q1)], 'x' : [cirq.CNOT(q0, q1)], 'notx' : [cirq.CNOT(q0, q1), cirq.X(q1)]}
def deutsch_algorithm(oracle):
## Yield a circuit for Deustch algorithm given operations implementing
yield cirq.X(q1)
yield cirq.H(q0), cirq.H(q1)
yield oracle
yield cirq.H(q0)
yield cirq.measure(q0)
for key, oracle in oracles.items():
print('Circuit for {} :'. format(key))
print(cirq.Circuit.from_ops(deutsch_algorithm(oracle)), end = "\n\n")
simulator = cirq.Simulator()
for key, oracle in oracles.items():
result = simulator.run(cirq.Circuit.from_ops(deutsch_algorithm(oracle)), repetitions=20)
print('Oracle : {:<4} results: {}'. format(key, result))
import cirq
q0, q1, q2 = cirq.LineQubit.range(3)
## Oracles for constant functions
constant = ([], [cirq.X(q2)])
## Oracles for balanced functions
balanced = ([cirq.CNOT(q0, q2)],
[cirq.CNOT(q1, q2)],
[cirq.CNOT(q0, q2), cirq.CNOT(q1, q2)],
[cirq.CNOT(q0, q2), cirq.X(q2)],
[cirq.CNOT(q1, q2), cirq.X(q2)],
[cirq.CNOT(q0, q2), cirq.CNOT(q1, q2), cirq.X(q2)]
)
def your_circuit(oracle):
## Phase kickback trick
yield cirq.X(q2), cirq.H(q2)
## Equal Superposition over input bits
yield cirq.H(q0), cirq.H(q1)
## Query the function
yield oracle
## interface to get result, put last qubit into |1>
yield cirq.H(q0), cirq.H(q1), cirq.H(q2)
## a final OR gate to put result in final qubit
yield cirq.X(q0), cirq.X(q1), cirq.CCX(q0, q1, q2)
yield cirq.measure(q2)
simulator = cirq.Simulator()
print("your result on constant functions:")
for oracle in constant:
result = simulator.run(cirq.Circuit.from_ops(your_circuit(oracle)), repetitions=10)
print(result)
print("your result on balanced functions:")
for oracle in balanced:
result = simulator.run(cirq.Circuit.from_ops(your_circuit(oracle)), repetitions=10)
print(result)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# If you introduce a list with less colors than bars, the color of the bars will
# alternate following the sequence from the list.
import numpy as np
from qiskit.quantum_info import DensityMatrix
from qiskit import QuantumCircuit
from qiskit.visualization import plot_state_paulivec
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc = QuantumCircuit(2)
qc.h([0, 1])
qc.cz(0, 1)
qc.ry(np.pi/3, 0)
qc.rx(np.pi/5, 1)
matrix = DensityMatrix(qc)
plot_state_paulivec(matrix, color=['crimson', 'midnightblue', 'seagreen'])
|
https://github.com/ranaarp/Qiskit-Meetup-content
|
ranaarp
|
#initialization
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# importing Qiskit
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
def phase_oracle(circuit, register):
circuit.cz(register[0], register[1])
qr = QuantumRegister(2)
oracleCircuit = QuantumCircuit(qr)
phase_oracle(oracleCircuit, qr)
oracleCircuit.draw(output="mpl")
def inversion_about_average(circuit, register):
"""Apply inversion about the average step of Grover's algorithm."""
circuit.h(register)
circuit.x(register)
circuit.h(register[1])
circuit.cx(register[0], register[1])
circuit.h(register[1])
circuit.x(register)
circuit.h(register)
qAverage = QuantumCircuit(qr)
inversion_about_average(qAverage, qr)
qAverage.draw(output='mpl')
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
groverCircuit = QuantumCircuit(qr,cr)
groverCircuit.h(qr)
phase_oracle(groverCircuit, qr)
inversion_about_average(groverCircuit, qr)
groverCircuit.measure(qr,cr)
groverCircuit.draw(output="mpl")
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(groverCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
# Load our saved IBMQ accounts and get the least busy backend device
# These are compatible with QisKit v0.11.x
# IBMQ.load_accounts()
# IBMQ.backends()
# backend_lb = least_busy(IBMQ.backends(simulator=False))
# print("Least busy backend: ", backend_lb)
# Updated to run on QisKit v0.12.0
IBMQ.load_account()
provider = IBMQ.get_provider(group='open')
backend_lb = least_busy(provider.backends(simulator=False, operational=True))
print("Least busy backend: ", backend_lb)
# Run our circuit on the least busy backend. Monitor the execution of the job in the queue
from qiskit.tools.monitor import job_monitor
backend = backend_lb
shots = 1024
job_exp = execute(groverCircuit, backend=backend, shots=shots)
job_monitor(job_exp, interval = 2)
# get the results from the computation
results = job_exp.result()
answer = results.get_counts(groverCircuit)
plot_histogram(answer)
# Initialization
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# Importing Qiskit
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# Import basic plot tools
from qiskit.tools.visualization import plot_histogram
def phase_oracle(circuit, register,oracle_register):
circuit.h(oracle_register)
circuit.ccx(register[0], register[1],oracle_register)
circuit.h(oracle_register)
qr = QuantumRegister(3)
oracleCircuit = QuantumCircuit(qr)
oracleCircuit.x(qr[2])
phase_oracle(oracleCircuit, qr,qr[2])
oracleCircuit.draw(output="mpl")
def inversion_about_average(circuit, register):
"""Apply inversion about the average step of Grover's algorithm."""
circuit.h(register)
circuit.x(register)
circuit.h(register[1])
circuit.cx(register[0], register[1])
circuit.h(register[1])
circuit.x(register)
circuit.h(register)
qAverage = QuantumCircuit(qr)
inversion_about_average(qAverage, qr[0:2])
qAverage.draw(output='mpl')
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
groverCircuit = QuantumCircuit(qr,cr)
groverCircuit.h(qr[0:2])
groverCircuit.x(qr[2])
phase_oracle(groverCircuit, qr,qr[2])
inversion_about_average(groverCircuit, qr[0:2])
groverCircuit.measure(qr,cr)
groverCircuit.draw(output="mpl")
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(groverCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
#initialization
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# importing Qiskit
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
def phase_oracle(circuit, register):
circuit.x(register[0])
circuit.cz(register[0], register[1])
circuit.x(register[0])
qr = QuantumRegister(2)
oracleCircuit = QuantumCircuit(qr)
phase_oracle(oracleCircuit, qr)
oracleCircuit.draw(output="mpl")
def inversion_about_average(circuit, register):
"""Apply inversion about the average step of Grover's algorithm."""
circuit.h(register)
circuit.x(register)
circuit.h(register[1])
circuit.cx(register[0], register[1])
circuit.h(register[1])
circuit.x(register)
circuit.h(register)
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
groverCircuit = QuantumCircuit(qr,cr)
groverCircuit.h(qr)
phase_oracle(groverCircuit, qr)
inversion_about_average(groverCircuit, qr)
groverCircuit.measure(qr,cr)
groverCircuit.draw(output="mpl")
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(groverCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
#initialization
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# importing Qiskit
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
def phase_oracle(circuit, register,oracle_register):
circuit.h(oracle_register)
circuit.x(register[0])
circuit.ccx(register[0], register[1],oracle_register)
circuit.x(register[0])
circuit.h(oracle_register)
qr = QuantumRegister(3)
oracleCircuit = QuantumCircuit(qr)
oracleCircuit.x(qr[2])
phase_oracle(oracleCircuit, qr,qr[2])
oracleCircuit.draw(output="mpl")
def inversion_about_average(circuit, register):
"""Apply inversion about the average step of Grover's algorithm."""
circuit.h(register)
circuit.x(register)
circuit.h(register[1])
circuit.cx(register[0], register[1])
circuit.h(register[1])
circuit.x(register)
circuit.h(register)
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
groverCircuit = QuantumCircuit(qr,cr)
groverCircuit.h(qr[0:2])
groverCircuit.x(qr[2])
phase_oracle(groverCircuit, qr,qr[2])
inversion_about_average(groverCircuit, qr[0:2])
groverCircuit.measure(qr,cr)
groverCircuit.draw(output="mpl")
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(groverCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
|
https://github.com/magn5452/QiskitQaoa
|
magn5452
|
from abc import abstractmethod, ABC
from qiskit import Aer
from qiskit.algorithms import NumPyMinimumEigensolver, QAOA
from qiskit.algorithms.optimizers import optimizer, COBYLA
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization.problems import quadratic_program
class MinimumEigenSolver(ABC):
@abstractmethod
def solve(self, quadratic_program):
pass
|
https://github.com/carstenblank/dc-qiskit-stochastics
|
carstenblank
|
# Copyright 2018-2022 Carsten Blank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Union
import numpy as np
import qiskit
import scipy
from dc_qiskit_algorithms.MöttönenStatePreparation import get_alpha_y
from qiskit.circuit import Parameter
from scipy import sparse
from .benchmark import brute_force_correlated_walk, monte_carlo_correlated_walk
from .discrete_stochastic_process import DiscreteStochasticProcess
LOG = logging.getLogger(__name__)
def index_binary_correlated_walk(level: int, level_p: np.ndarray, **kwargs) -> qiskit.QuantumCircuit:
probs = []
probs.append([np.sqrt(level_p[0]), np.sqrt(1 - level_p[0])])
probs.append([np.sqrt(1 - level_p[1]), np.sqrt(level_p[1])])
qc = qiskit.QuantumCircuit(name='index_state_prep')
qreg = qiskit.QuantumRegister(1, f'level_{level}')
qc.add_register(qreg)
if level == 0:
vector = sparse.dok_matrix([probs[0]]).transpose()
alpha = get_alpha_y(vector, 1, 1)
qc.ry(alpha[0, 0], qreg)
else:
previous_level_qreg = qiskit.QuantumRegister(1, f'level_{level - 1}')
qc.add_register(previous_level_qreg)
# Activating the 1 path
vector = sparse.dok_matrix([probs[1]]).transpose()
alpha = get_alpha_y(vector, 1, 1)
qc.cry(alpha[0, 0], previous_level_qreg, qreg)
# Activating the 0 path
vector = sparse.dok_matrix([probs[0]]).transpose()
alpha = get_alpha_y(vector, 1, 1)
qc.x(previous_level_qreg)
qc.cry(alpha[0, 0], previous_level_qreg, qreg)
qc.x(previous_level_qreg)
return qc
def benchmark_brute_force(probabilities,
realizations,
evaluations: Union[List[float], np.ndarray, scipy.sparse.dok_matrix],
func=None) -> np.ndarray:
logging.basicConfig(format=logging.BASIC_FORMAT)
output: List[complex] = []
for e in list(evaluations):
c_func_eval = brute_force_correlated_walk(
probabilities=probabilities,
realizations=realizations,
initial_value=0.0,
scaling=e,
func=lambda x: np.exp(1.0j * x) if func is None else func
)
output.append(c_func_eval)
return np.asarray(output)
def benchmark_monte_carlo(
probabilities,
realizations,
evaluations: Union[List[float], np.ndarray, scipy.sparse.dok_matrix],
initial_value: float = 0.0,
samples: int = 100,
func=None) -> np.ndarray:
c_func_eval = monte_carlo_correlated_walk(
probabilities=probabilities,
realizations=realizations,
initial_value=initial_value,
samples=samples,
scaling=evaluations,
func=lambda v: np.exp(1.0j * v) if func is None else func
)
return np.asarray(c_func_eval)
class CorrelatedWalk(DiscreteStochasticProcess):
def __init__(self, initial_value: float, probabilities: np.ndarray, realizations: np.ndarray):
assert probabilities.shape == realizations.shape
super().__init__(initial_value, probabilities, realizations)
def _proposition_one_circuit(self, scaling: Parameter, level_func=None, index_state_prep=None, **kwargs):
index_state_prep = index_binary_correlated_walk if index_state_prep is None else index_state_prep
return super(CorrelatedWalk, self)._proposition_one_circuit(scaling, level_func, index_state_prep, **kwargs)
def benchmark(self, evaluations: Union[List[float], np.ndarray, scipy.sparse.dok_matrix],
func=None, samples: int = 100) -> np.ndarray:
return benchmark_monte_carlo(
probabilities=self.probabilities,
realizations=self.realizations,
evaluations=evaluations,
initial_value=self.initial_value,
samples=samples,
func=func
)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.circuit.library import IntegerComparator
from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem
from qiskit_aer.primitives import Sampler
# set problem parameters
n_z = 2
z_max = 2
z_values = np.linspace(-z_max, z_max, 2**n_z)
p_zeros = [0.15, 0.25]
rhos = [0.1, 0.05]
lgd = [1, 2]
K = len(p_zeros)
alpha = 0.05
from qiskit_finance.circuit.library import GaussianConditionalIndependenceModel as GCI
u = GCI(n_z, z_max, p_zeros, rhos)
u.draw()
u_measure = u.measure_all(inplace=False)
sampler = Sampler()
job = sampler.run(u_measure)
binary_probabilities = job.result().quasi_dists[0].binary_probabilities()
# analyze uncertainty circuit and determine exact solutions
p_z = np.zeros(2**n_z)
p_default = np.zeros(K)
values = []
probabilities = []
num_qubits = u.num_qubits
for i, prob in binary_probabilities.items():
# extract value of Z and corresponding probability
i_normal = int(i[-n_z:], 2)
p_z[i_normal] += prob
# determine overall default probability for k
loss = 0
for k in range(K):
if i[K - k - 1] == "1":
p_default[k] += prob
loss += lgd[k]
values += [loss]
probabilities += [prob]
values = np.array(values)
probabilities = np.array(probabilities)
expected_loss = np.dot(values, probabilities)
losses = np.sort(np.unique(values))
pdf = np.zeros(len(losses))
for i, v in enumerate(losses):
pdf[i] += sum(probabilities[values == v])
cdf = np.cumsum(pdf)
i_var = np.argmax(cdf >= 1 - alpha)
exact_var = losses[i_var]
exact_cvar = np.dot(pdf[(i_var + 1) :], losses[(i_var + 1) :]) / sum(pdf[(i_var + 1) :])
print("Expected Loss E[L]: %.4f" % expected_loss)
print("Value at Risk VaR[L]: %.4f" % exact_var)
print("P[L <= VaR[L]]: %.4f" % cdf[exact_var])
print("Conditional Value at Risk CVaR[L]: %.4f" % exact_cvar)
# plot loss PDF, expected loss, var, and cvar
plt.bar(losses, pdf)
plt.axvline(expected_loss, color="green", linestyle="--", label="E[L]")
plt.axvline(exact_var, color="orange", linestyle="--", label="VaR(L)")
plt.axvline(exact_cvar, color="red", linestyle="--", label="CVaR(L)")
plt.legend(fontsize=15)
plt.xlabel("Loss L ($)", size=15)
plt.ylabel("probability (%)", size=15)
plt.title("Loss Distribution", size=20)
plt.xticks(size=15)
plt.yticks(size=15)
plt.show()
# plot results for Z
plt.plot(z_values, p_z, "o-", linewidth=3, markersize=8)
plt.grid()
plt.xlabel("Z value", size=15)
plt.ylabel("probability (%)", size=15)
plt.title("Z Distribution", size=20)
plt.xticks(size=15)
plt.yticks(size=15)
plt.show()
# plot results for default probabilities
plt.bar(range(K), p_default)
plt.xlabel("Asset", size=15)
plt.ylabel("probability (%)", size=15)
plt.title("Individual Default Probabilities", size=20)
plt.xticks(range(K), size=15)
plt.yticks(size=15)
plt.grid()
plt.show()
# add Z qubits with weight/loss 0
from qiskit.circuit.library import WeightedAdder
agg = WeightedAdder(n_z + K, [0] * n_z + lgd)
from qiskit.circuit.library import LinearAmplitudeFunction
# define linear objective function
breakpoints = [0]
slopes = [1]
offsets = [0]
f_min = 0
f_max = sum(lgd)
c_approx = 0.25
objective = LinearAmplitudeFunction(
agg.num_sum_qubits,
slope=slopes,
offset=offsets,
# max value that can be reached by the qubit register (will not always be reached)
domain=(0, 2**agg.num_sum_qubits - 1),
image=(f_min, f_max),
rescaling_factor=c_approx,
breakpoints=breakpoints,
)
# define the registers for convenience and readability
qr_state = QuantumRegister(u.num_qubits, "state")
qr_sum = QuantumRegister(agg.num_sum_qubits, "sum")
qr_carry = QuantumRegister(agg.num_carry_qubits, "carry")
qr_obj = QuantumRegister(1, "objective")
# define the circuit
state_preparation = QuantumCircuit(qr_state, qr_obj, qr_sum, qr_carry, name="A")
# load the random variable
state_preparation.append(u.to_gate(), qr_state)
# aggregate
state_preparation.append(agg.to_gate(), qr_state[:] + qr_sum[:] + qr_carry[:])
# linear objective function
state_preparation.append(objective.to_gate(), qr_sum[:] + qr_obj[:])
# uncompute aggregation
state_preparation.append(agg.to_gate().inverse(), qr_state[:] + qr_sum[:] + qr_carry[:])
# draw the circuit
state_preparation.draw()
state_preparation_measure = state_preparation.measure_all(inplace=False)
sampler = Sampler()
job = sampler.run(state_preparation_measure)
binary_probabilities = job.result().quasi_dists[0].binary_probabilities()
# evaluate the result
value = 0
for i, prob in binary_probabilities.items():
if prob > 1e-6 and i[-(len(qr_state) + 1) :][0] == "1":
value += prob
print("Exact Expected Loss: %.4f" % expected_loss)
print("Exact Operator Value: %.4f" % value)
print("Mapped Operator value: %.4f" % objective.post_processing(value))
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
problem = EstimationProblem(
state_preparation=state_preparation,
objective_qubits=[len(qr_state)],
post_processing=objective.post_processing,
)
# construct amplitude estimation
ae = IterativeAmplitudeEstimation(
epsilon_target=epsilon, alpha=alpha, sampler=Sampler(run_options={"shots": 100})
)
result = ae.estimate(problem)
# print results
conf_int = np.array(result.confidence_interval_processed)
print("Exact value: \t%.4f" % expected_loss)
print("Estimated value:\t%.4f" % result.estimation_processed)
print("Confidence interval: \t[%.4f, %.4f]" % tuple(conf_int))
# set x value to estimate the CDF
x_eval = 2
comparator = IntegerComparator(agg.num_sum_qubits, x_eval + 1, geq=False)
comparator.draw()
def get_cdf_circuit(x_eval):
# define the registers for convenience and readability
qr_state = QuantumRegister(u.num_qubits, "state")
qr_sum = QuantumRegister(agg.num_sum_qubits, "sum")
qr_carry = QuantumRegister(agg.num_carry_qubits, "carry")
qr_obj = QuantumRegister(1, "objective")
qr_compare = QuantumRegister(1, "compare")
# define the circuit
state_preparation = QuantumCircuit(qr_state, qr_obj, qr_sum, qr_carry, name="A")
# load the random variable
state_preparation.append(u, qr_state)
# aggregate
state_preparation.append(agg, qr_state[:] + qr_sum[:] + qr_carry[:])
# comparator objective function
comparator = IntegerComparator(agg.num_sum_qubits, x_eval + 1, geq=False)
state_preparation.append(comparator, qr_sum[:] + qr_obj[:] + qr_carry[:])
# uncompute aggregation
state_preparation.append(agg.inverse(), qr_state[:] + qr_sum[:] + qr_carry[:])
return state_preparation
state_preparation = get_cdf_circuit(x_eval)
state_preparation.draw()
state_preparation_measure = state_preparation.measure_all(inplace=False)
sampler = Sampler()
job = sampler.run(state_preparation_measure)
binary_probabilities = job.result().quasi_dists[0].binary_probabilities()
# evaluate the result
var_prob = 0
for i, prob in binary_probabilities.items():
if prob > 1e-6 and i[-(len(qr_state) + 1) :][0] == "1":
var_prob += prob
print("Operator CDF(%s)" % x_eval + " = %.4f" % var_prob)
print("Exact CDF(%s)" % x_eval + " = %.4f" % cdf[x_eval])
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
problem = EstimationProblem(state_preparation=state_preparation, objective_qubits=[len(qr_state)])
# construct amplitude estimation
ae_cdf = IterativeAmplitudeEstimation(
epsilon_target=epsilon, alpha=alpha, sampler=Sampler(run_options={"shots": 100})
)
result_cdf = ae_cdf.estimate(problem)
# print results
conf_int = np.array(result_cdf.confidence_interval)
print("Exact value: \t%.4f" % cdf[x_eval])
print("Estimated value:\t%.4f" % result_cdf.estimation)
print("Confidence interval: \t[%.4f, %.4f]" % tuple(conf_int))
def run_ae_for_cdf(x_eval, epsilon=0.01, alpha=0.05, simulator="aer_simulator"):
# construct amplitude estimation
state_preparation = get_cdf_circuit(x_eval)
problem = EstimationProblem(
state_preparation=state_preparation, objective_qubits=[len(qr_state)]
)
ae_var = IterativeAmplitudeEstimation(
epsilon_target=epsilon, alpha=alpha, sampler=Sampler(run_options={"shots": 100})
)
result_var = ae_var.estimate(problem)
return result_var.estimation
def bisection_search(
objective, target_value, low_level, high_level, low_value=None, high_value=None
):
"""
Determines the smallest level such that the objective value is still larger than the target
:param objective: objective function
:param target: target value
:param low_level: lowest level to be considered
:param high_level: highest level to be considered
:param low_value: value of lowest level (will be evaluated if set to None)
:param high_value: value of highest level (will be evaluated if set to None)
:return: dictionary with level, value, num_eval
"""
# check whether low and high values are given and evaluated them otherwise
print("--------------------------------------------------------------------")
print("start bisection search for target value %.3f" % target_value)
print("--------------------------------------------------------------------")
num_eval = 0
if low_value is None:
low_value = objective(low_level)
num_eval += 1
if high_value is None:
high_value = objective(high_level)
num_eval += 1
# check if low_value already satisfies the condition
if low_value > target_value:
return {
"level": low_level,
"value": low_value,
"num_eval": num_eval,
"comment": "returned low value",
}
elif low_value == target_value:
return {"level": low_level, "value": low_value, "num_eval": num_eval, "comment": "success"}
# check if high_value is above target
if high_value < target_value:
return {
"level": high_level,
"value": high_value,
"num_eval": num_eval,
"comment": "returned low value",
}
elif high_value == target_value:
return {
"level": high_level,
"value": high_value,
"num_eval": num_eval,
"comment": "success",
}
# perform bisection search until
print("low_level low_value level value high_level high_value")
print("--------------------------------------------------------------------")
while high_level - low_level > 1:
level = int(np.round((high_level + low_level) / 2.0))
num_eval += 1
value = objective(level)
print(
"%2d %.3f %2d %.3f %2d %.3f"
% (low_level, low_value, level, value, high_level, high_value)
)
if value >= target_value:
high_level = level
high_value = value
else:
low_level = level
low_value = value
# return high value after bisection search
print("--------------------------------------------------------------------")
print("finished bisection search")
print("--------------------------------------------------------------------")
return {"level": high_level, "value": high_value, "num_eval": num_eval, "comment": "success"}
# run bisection search to determine VaR
objective = lambda x: run_ae_for_cdf(x)
bisection_result = bisection_search(
objective, 1 - alpha, min(losses) - 1, max(losses), low_value=0, high_value=1
)
var = bisection_result["level"]
print("Estimated Value at Risk: %2d" % var)
print("Exact Value at Risk: %2d" % exact_var)
print("Estimated Probability: %.3f" % bisection_result["value"])
print("Exact Probability: %.3f" % cdf[exact_var])
# define linear objective
breakpoints = [0, var]
slopes = [0, 1]
offsets = [0, 0] # subtract VaR and add it later to the estimate
f_min = 0
f_max = 3 - var
c_approx = 0.25
cvar_objective = LinearAmplitudeFunction(
agg.num_sum_qubits,
slopes,
offsets,
domain=(0, 2**agg.num_sum_qubits - 1),
image=(f_min, f_max),
rescaling_factor=c_approx,
breakpoints=breakpoints,
)
cvar_objective.draw()
# define the registers for convenience and readability
qr_state = QuantumRegister(u.num_qubits, "state")
qr_sum = QuantumRegister(agg.num_sum_qubits, "sum")
qr_carry = QuantumRegister(agg.num_carry_qubits, "carry")
qr_obj = QuantumRegister(1, "objective")
qr_work = QuantumRegister(cvar_objective.num_ancillas - len(qr_carry), "work")
# define the circuit
state_preparation = QuantumCircuit(qr_state, qr_obj, qr_sum, qr_carry, qr_work, name="A")
# load the random variable
state_preparation.append(u, qr_state)
# aggregate
state_preparation.append(agg, qr_state[:] + qr_sum[:] + qr_carry[:])
# linear objective function
state_preparation.append(cvar_objective, qr_sum[:] + qr_obj[:] + qr_carry[:] + qr_work[:])
# uncompute aggregation
state_preparation.append(agg.inverse(), qr_state[:] + qr_sum[:] + qr_carry[:])
state_preparation_measure = state_preparation.measure_all(inplace=False)
sampler = Sampler()
job = sampler.run(state_preparation_measure)
binary_probabilities = job.result().quasi_dists[0].binary_probabilities()
# evaluate the result
value = 0
for i, prob in binary_probabilities.items():
if prob > 1e-6 and i[-(len(qr_state) + 1)] == "1":
value += prob
# normalize and add VaR to estimate
value = cvar_objective.post_processing(value)
d = 1.0 - bisection_result["value"]
v = value / d if d != 0 else 0
normalized_value = v + var
print("Estimated CVaR: %.4f" % normalized_value)
print("Exact CVaR: %.4f" % exact_cvar)
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
problem = EstimationProblem(
state_preparation=state_preparation,
objective_qubits=[len(qr_state)],
post_processing=cvar_objective.post_processing,
)
# construct amplitude estimation
ae_cvar = IterativeAmplitudeEstimation(
epsilon_target=epsilon, alpha=alpha, sampler=Sampler(run_options={"shots": 100})
)
result_cvar = ae_cvar.estimate(problem)
# print results
d = 1.0 - bisection_result["value"]
v = result_cvar.estimation_processed / d if d != 0 else 0
print("Exact CVaR: \t%.4f" % exact_cvar)
print("Estimated CVaR:\t%.4f" % (v + var))
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.timeline import draw, IQXSimple
from qiskit.providers.fake_provider import FakeBoeblingen
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0,1)
qc = transpile(qc, FakeBoeblingen(), scheduling_method='alap', layout_method='trivial')
draw(qc, style=IQXSimple())
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
print("Hello! I'm a code cell")
print('Set up started...')
%matplotlib notebook
import sys
sys.path.append('game_engines')
import hello_quantum
print('Set up complete!')
initialize = []
success_condition = {}
allowed_gates = {'0': {'NOT': 3}, '1': {}, 'both': {}}
vi = [[1], False, False]
qubit_names = {'0':'the only bit', '1':None}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = []
success_condition = {}
allowed_gates = {'0': {}, '1': {'NOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '0']]
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '0']]
success_condition = {'ZI': 1.0, 'IZ': -1.0}
allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0']]
success_condition = {'IZ': 0.0}
allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0']]
success_condition = {'ZZ': -1.0}
allowed_gates = {'0': {'NOT': 0, 'CNOT': 0}, '1': {'NOT': 0, 'CNOT': 0}, 'both': {}}
vi = [[], False, True]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '1']]
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'NOT': 0, 'CNOT': 0}, '1': {'NOT': 0, 'CNOT': 0}, 'both': {}}
vi = [[], False, True]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [ ["x","0"] ]
success_condition = {"ZI":1.0}
allowed_gates = { "0":{"x":3}, "1":{}, "both":{} }
vi = [[1],True,True]
qubit_names = {'0':'the only qubit', '1':None}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '0']]
success_condition = {'ZI': 1.0}
allowed_gates = {'0': {'x': 0}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'the only qubit', '1':None}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '1']]
success_condition = {'IZ': 1.0}
allowed_gates = {'0': {}, '1': {'x': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':None, '1':'the other qubit'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = []
success_condition = {'ZI': 0.0}
allowed_gates = {'0': {'h': 3}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '1']]
success_condition = {'IZ': 1.0}
allowed_gates = {'0': {}, '1': {'x': 3, 'h': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0'], ['z', '0']]
success_condition = {'XI': 1.0}
allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = []
success_condition = {'ZI': -1.0}
allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0']]
success_condition = {'IX': 1.0}
allowed_gates = {'0': {}, '1': {'z': 0, 'h': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['ry(pi/4)', '1']]
success_condition = {'IZ': -0.7071, 'IX': -0.7071}
allowed_gates = {'0': {}, '1': {'z': 0, 'h': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '1']]
success_condition = {'ZI': 0.0, 'IZ': 0.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, False]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h','0'],['h','1']]
success_condition = {'ZZ': -1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x','0']]
success_condition = {'XX': 1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = []
success_condition = {'XZ': -1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['ry(-pi/4)', '1'], ['ry(-pi/4)','0']]
success_condition = {'ZI': -0.7071, 'IZ': -0.7071}
allowed_gates = {'0': {'x': 0}, '1': {'x': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '1'], ['x','0']]
success_condition = {'XI':1, 'IX':1}
allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '0']]
success_condition = {'ZI': 1.0, 'IZ': -1.0}
allowed_gates = {'0': {'cx': 0}, '1': {'cx': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0'],['x', '1']]
success_condition = {'XI': -1.0, 'IZ': 1.0}
allowed_gates = {'0': {'h': 0, 'cz': 0}, '1': {'cx': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0'],['x', '1'],['h', '1']]
success_condition = { }
allowed_gates = {'0':{'cz': 2}, '1':{'cz': 2}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x', '0']]
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['h', '0'],['h', '1']]
success_condition = {'XI': -1.0, 'IX': -1.0}
allowed_gates = {'0': {}, '1': {'z':0,'cx': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = []
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'x':0,'h':0,'cx':0}, '1': {'h':0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['ry(-pi/4)','0'],['ry(-pi/4)','0'],['ry(-pi/4)','0'],['x','0'],['x','1']]
success_condition = {'ZI': -1.0,'XI':0,'IZ':0.7071,'IX':-0.7071}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x','0'],['h','1']]
success_condition = {'IX':1,'ZI':-1}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz':3}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names,shots=2000)
initialize = [['x','1']]
success_condition = {'IZ':1.0,'ZI':-1.0}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names,shots=2000)
initialize = []
success_condition = {}
allowed_gates = {'0': {'ry(pi/4)': 4}, '1': {}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
initialize = [['x','0']]
success_condition = {'XX': 1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
initialize = []
success_condition = {'ZI': -1.0}
allowed_gates = {'0': {'bloch':1, 'ry(pi/4)': 0}, '1':{}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
initialize = [['h','0'],['h','1']]
success_condition = {'ZI': -1.0,'IZ': -1.0}
allowed_gates = {'0': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
initialize = [['h','0']]
success_condition = {'ZZ': 1.0}
allowed_gates = {'0': {}, '1': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'unbloch':0,'cz':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
initialize = [['ry(pi/4)','0'],['ry(pi/4)','1']]
success_condition = {'ZI': 1.0,'IZ': 1.0}
allowed_gates = {'0': {'bloch':0, 'z':0, 'ry(pi/4)': 1}, '1': {'bloch':0, 'x':0, 'ry(pi/4)': 1}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
initialize = [['x','0'],['h','1']]
success_condition = {'IZ': 1.0}
allowed_gates = {'0': {}, '1': {'bloch':0, 'cx':0, 'ry(pi/4)': 1, 'ry(-pi/4)': 1}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
initialize = []
success_condition = {'IZ': 1.0,'IX': 1.0}
allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'qubit 0', '1':'qubit 1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
import random
def setup_variables ():
### Replace this section with anything you want ###
r = random.random()
A = r*(2/3)
B = r*(1/3)
### End of section ###
return A, B
def hash2bit ( variable, hash ):
### Replace this section with anything you want ###
if hash=='V':
bit = (variable<0.5)
elif hash=='H':
bit = (variable<0.25)
bit = str(int(bit))
### End of section ###
return bit
shots = 8192
def calculate_P ( ):
P = {}
for hashes in ['VV','VH','HV','HH']:
# calculate each P[hashes] by sampling over `shots` samples
P[hashes] = 0
for shot in range(shots):
A, B = setup_variables()
a = hash2bit ( A, hashes[0] ) # hash type for variable `A` is the first character of `hashes`
b = hash2bit ( B, hashes[1] ) # hash type for variable `B` is the second character of `hashes`
P[hashes] += (a!=b) / shots
return P
P = calculate_P()
print(P)
def bell_test (P):
sum_P = sum(P.values())
for hashes in P:
bound = sum_P - P[hashes]
print("The upper bound for P['"+hashes+"'] is "+str(bound))
print("The value of P['"+hashes+"'] is "+str(P[hashes]))
if P[hashes]<=bound:
print("The upper bound is obeyed :)\n")
else:
if P[hashes]-bound < 0.1:
print("This seems to have gone over the upper bound, but only by a little bit :S\nProbably just rounding errors or statistical noise.\n")
else:
print("!!!!! This has gone well over the upper bound :O !!!!!\n")
bell_test(P)
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
def initialize_program ():
qubit = QuantumRegister(2)
A = qubit[0]
B = qubit[1]
bit = ClassicalRegister(2)
a = bit[0]
b = bit[1]
qc = QuantumCircuit(qubit, bit)
return A, B, a, b, qc
def hash2bit ( variable, hash, bit, qc ):
if hash=='H':
qc.h( variable )
qc.measure( variable, bit )
initialize = []
success_condition = {'ZZ':-0.7071,'ZX':-0.7071,'XZ':-0.7071,'XX':-0.7071}
allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'A', '1':'B'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
import numpy as np
def setup_variables ( A, B, qc ):
for line in puzzle.program:
eval(line)
shots = 8192
from qiskit import execute
def calculate_P ( backend ):
P = {}
program = {}
for hashes in ['VV','VH','HV','HH']:
A, B, a, b, program[hashes] = initialize_program ()
setup_variables( A, B, program[hashes] )
hash2bit ( A, hashes[0], a, program[hashes])
hash2bit ( B, hashes[1], b, program[hashes])
# submit jobs
job = execute( list(program.values()), backend, shots=shots )
# get the results
for hashes in ['VV','VH','HV','HH']:
stats = job.result().get_counts(program[hashes])
P[hashes] = 0
for string in stats.keys():
a = string[-1]
b = string[-2]
if a!=b:
P[hashes] += stats[string] / shots
return P
device = 'qasm_simulator'
from qiskit import Aer, IBMQ
try:
IBMQ.load_accounts()
except:
pass
try:
backend = Aer.get_backend(device)
except:
backend = IBMQ.get_backend(device)
print(backend.status())
P = calculate_P( backend )
print(P)
bell_test( P )
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.visualization.timeline import draw as timeline_draw
from qiskit import QuantumCircuit, transpile
from qiskit.providers.fake_provider import FakeBoeblingen
backend = FakeBoeblingen()
ghz = QuantumCircuit(5)
ghz.h(0)
ghz.cx(0,range(1,5))
circ = transpile(ghz, backend, scheduling_method="asap")
timeline_draw(circ)
|
https://github.com/Juan-Varela11/BNL_2020_Summer_Internship
|
Juan-Varela11
|
# Importing useful packages
import sys
import time
import numpy as np
import pandas as pd
import seaborn as sns
from tabulate import tabulate
import matplotlib.pyplot as plt
# Importing QISKit
from qiskit import Aer, execute, BasicAer
from qiskit.aqua.components.initial_states import Zero
from qiskit.aqua.algorithms.adaptive import VQE
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.operators import WeightedPauliOperator
from qiskit.aqua.algorithms import ExactEigensolver
from qiskit.aqua.components.initial_states import Custom
from qiskit.aqua.components.variational_forms import RY, RYRZ
from qiskit.aqua.operators.op_converter import to_weighted_pauli_operator
from qiskit.aqua.components.optimizers import COBYLA, SPSA, L_BFGS_B, SLSQP
from qiskit.aqua.operators import (TPBGroupedWeightedPauliOperator, WeightedPauliOperator, MatrixOperator)
import qiskit.tools.jupyter
%qiskit_version_table
def run_VQE(**kwargs):
"""Runs the Variational Quantum Eigensolver (VQE)
Args:
file_path (str): path to txt file containing Hamiltonian
simulator (str): [statevector_simulator, qasm_simulator] The type of simulator
you wish to use (statevector is noiseless, while qasm contains a noise model)
Returns:
return_dict (dict): Dictionary with the following Keys:
file_name (str): Name of file used for simulation
result_df (DataFrame): Pandas DataFrame containing the
convergence count (df index), convergence_vals, and percent_error
reference_val (float): GS Energy found using the Exact Eigen Solver
algos (QISKit VQE Object): Object returned by the VQE algorithm class
algo_results (dict): Results from VQE run (one algorithim result dict
for each optimizer)
"""
file_path = kwargs['file_path']
init_state_name = kwargs['init_state_name']
vf_name = kwargs['vf_name']
depth = kwargs['depth']
shots = kwargs['shots']
simulator = kwargs['simulator']
# Loading matrix representation of Hamiltonian txt file
H = np.loadtxt(file_path)
# Converting Hamiltonian to Matrix Operator
qubit_op = MatrixOperator(matrix=H)
# Converting to Pauli Operator
qubit_op = to_weighted_pauli_operator(qubit_op)
start = time.time()
num_qubits = qubit_op.num_qubits
num_paulis = len(qubit_op.paulis)
num_qubits = qubit_op.num_qubits
print(f"""{num_paulis} Pauli factors \n{round(time.time()-start)} s to process""")
# Solving system exactly for reference value
ee = ExactEigensolver(qubit_op)
result = ee.run()
ref = result['energy']
# Setting initial state, variational form, and backend
init_state = init_state_name(num_qubits)
var_form = vf_name(num_qubits,
depth=depth,
entanglement='linear',
initial_state=init_state)
backend = BasicAer.get_backend(simulator)
# Don't use SPSA if using a noiseless simulator
if simulator == 'statevector_simulator':
optimizers = [COBYLA, L_BFGS_B, SLSQP]
else:
optimizers = [SPSA]
# Initializing empty lists & dicts for storage
dfs = []
algos = {}
algo_results = {}
for optimizer in optimizers:
# For reproducibility
aqua_globals.random_seed = 250
print(f'\rOptimizer: {optimizer.__name__} ', end='')
counts = []
values = []
params = []
def store_intermediate_result(eval_count, parameters, mean, std):
counts.append(eval_count)
values.append(mean)
params.append(parameters)
# Running VQE
algo = VQE(qubit_op, var_form, optimizer(), callback=store_intermediate_result)
quantum_instance = QuantumInstance(backend=backend, shots=shots)
algo_result = algo.run(quantum_instance)
# Appending each optimizer data frame to one list
dfs.append(pd.DataFrame(values, columns=['convergence_vals'], index=counts))
algos[optimizer.__name__] = algo
algo_results[optimizer.__name__] = algo_result
print('\rOptimization complete')
# Formatting return data frame
results = pd.concat([df for df in dfs], keys=[optimizer.__name__ for optimizer in optimizers])
results['energy_diff'] = results['convergence_vals'].apply(lambda x: abs(x - ref))
results['percent_error'] = results['convergence_vals'].apply(lambda x: (abs(x - ref)/ref)*100)
return_dict = {}
return_dict['file_name'] = file_path
return_dict['result_df'] = results
return_dict['reference_val'] = ref
return_dict['algos'] = algos
return_dict['algo_results'] = algo_results
return return_dict
def print_table(**result_dict):
"""Prints results of VQE simulation for each optimizer
Args:
result_dict (dict): Dictionary returned from run_VQE function
Returns: None
"""
algo_results = result_dict['algo_results']
ref_val = result_dict['reference_val']
file_name = result_dict['file_name']
fn = file_name.split('/')[-1].replace(".txt", "")
print(f'\nHamiltonian: {fn}')
print(f'Reference Value: {ref_val}')
optimizers = algo_results.keys()
vals = []
for opt in optimizers:
vals.append([opt,
algo_results[opt]['energy'],
abs((algo_results[opt]['energy'] - ref_val)/ref_val)*100])
sorted_vals = sorted(vals, key=lambda x: x[2])
print(tabulate(sorted_vals,
tablefmt="fancy_grid",
headers=['Optimizer', 'VQE Energy', '% Error'],
numalign="right"))
def visualize_results(**result_dict):
"""Graphs optimizer energy convergence and convergence of most effective
optimizer.
Args:
result_dict (dict): Dictionary returned from run_VQE function
Returns: None
"""
result_df = result_dict['result_df']
algo_results = result_dict['algo_results']
ref_val = result_dict['reference_val']
# Optimizer Convergence
plt.figure(figsize=(10, 5))
for opt in result_df.index.unique(0):
result_df['energy_diff'][opt].plot(logy=True, label=opt)
plt.title('Optimizer Energy Convergence')
plt.xlabel('Eval count')
plt.ylabel('Energy difference from solution reference value')
plt.legend()
sns.despine()
plt.show();
# Most effective optimizer
most_eff = sorted([(opt,
result_df['percent_error'][opt].iloc[-1]) for opt in result_df.index.unique(0)],
key=lambda x: x[1])
fig, ax = plt.subplots(figsize=(10, 5))
result_df['convergence_vals'][most_eff[0][0]].plot(label=f"{most_eff[0][0]} = {algo_results[most_eff[0][0]]['energy']:.6f}")
ax.hlines(y=ref_val,
xmin=0,
xmax=len(result_df['convergence_vals'][most_eff[0][0]]),
colors='r',
label=f'Exact = {ref_val:.6f}')
plt.title('Convergence', size=24)
plt.xlabel('Optimization Steps', size=18)
plt.ylabel('Energy', size=18)
plt.legend()
sns.despine()
plt.show();
def plt_state_vector(**result_dict):
"""Plots state vector for each optimizer
Args:
result_dict (dict): Dictionary returned from run_VQE function
Returns: None
"""
algos = result_dict['algos']
algo_results = result_dict['algo_results']
optimizers = result_dict['algos'].keys()
for optimizer in optimizers:
circ = algos[optimizer].construct_circuit(algo_results[optimizer]['opt_params'])
e0wf = execute(circ[0],
Aer.get_backend('statevector_simulator'),
shots=1).result().get_statevector(circ[0])
plt.figure(figsize=(10,5))
plt.plot(np.flip(e0wf.real))
plt.title(f'({optimizer}) State Vector', size=24)
sns.despine()
plt.show();
h_osc_params = {
'file_path': r'JA_harmonic_oscillator_q=4.txt',
'init_state_name': Zero,
'vf_name': RY,
'depth': 3,
'shots': 1000,
'simulator': 'statevector_simulator'
}
harmonic_osc = run_VQE(**h_osc_params)
print_table(**harmonic_osc)
for key in harmonic_osc.keys():
print(f'{key}: {harmonic_osc[key]}')
print('\n')
visualize_results(**harmonic_osc)
plt_state_vector(**harmonic_osc)
anh_gs = (3/8) * (6 / (1/2)**2) ** (1/3)
print(f'Upper bound for the ground state energy of the anharmonic oscillator: {anh_gs:.2f} eV')
print(f'Percent error: {abs((anh_gs-1.06)/1.06)*100:.2f}%')
anh_osc_params = {
'file_path': r'JA_adapted_anharmonic_oscillator_q=4.txt',
'init_state_name': Zero,
'vf_name': RY,
'depth': 5,
'shots': 5000,
'simulator': 'statevector_simulator'
}
anharmonic_osc = run_VQE(**anh_osc_params)
print_table(**anharmonic_osc)
visualize_results(**anharmonic_osc)
plt_state_vector(**anharmonic_osc)
%qiskit_copyright
|
https://github.com/JayRGopal/Quantum-Error-Correction
|
JayRGopal
|
!pip install qiskit -q
from qiskit import *
%matplotlib inline
from qiskit.tools.visualization import plot_histogram
secretnumber = '0110110110101011110100101010101001'
circuit = QuantumCircuit(len(secretnumber)+1, len(secretnumber))
circuit.h(range(len(secretnumber)))
circuit.x(len(secretnumber))
circuit.h(len(secretnumber))
circuit.barrier()
for ii, yesno in enumerate(reversed(secretnumber)):
if yesno == '1':
circuit.cx(ii, len(secretnumber))
circuit.barrier()
circuit.h(range(len(secretnumber)))
circuit.barrier()
circuit.measure(range(len(secretnumber)), range(len(secretnumber)))
circuit.draw(output='text')
simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend = simulator, shots = 1).result()
counts = result.get_counts()
print(counts)
|
https://github.com/ElePT/qiskit-algorithms-test
|
ElePT
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test TrotterQRTE."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from ddt import ddt, data, unpack
import numpy as np
from scipy.linalg import expm
from numpy.testing import assert_raises
from qiskit_algorithms.time_evolvers import TimeEvolutionProblem, TrotterQRTE
from qiskit.primitives import Estimator
from qiskit import QuantumCircuit
from qiskit.circuit.library import ZGate
from qiskit.quantum_info import Statevector, Pauli, SparsePauliOp
from qiskit.utils import algorithm_globals
from qiskit.circuit import Parameter
from qiskit.opflow import PauliSumOp, X, MatrixOp
from qiskit.synthesis import SuzukiTrotter, QDrift
@ddt
class TestTrotterQRTE(QiskitAlgorithmsTestCase):
"""TrotterQRTE tests."""
def setUp(self):
super().setUp()
self.seed = 50
algorithm_globals.random_seed = self.seed
@data(
(
None,
Statevector([0.29192658 - 0.45464871j, 0.70807342 - 0.45464871j]),
),
(
SuzukiTrotter(),
Statevector([0.29192658 - 0.84147098j, 0.0 - 0.45464871j]),
),
)
@unpack
def test_trotter_qrte_trotter_single_qubit(self, product_formula, expected_state):
"""Test for default TrotterQRTE on a single qubit."""
with self.assertWarns(DeprecationWarning):
operator = PauliSumOp(SparsePauliOp([Pauli("X"), Pauli("Z")]))
initial_state = QuantumCircuit(1)
time = 1
evolution_problem = TimeEvolutionProblem(operator, time, initial_state)
trotter_qrte = TrotterQRTE(product_formula=product_formula)
evolution_result_state_circuit = trotter_qrte.evolve(evolution_problem).evolved_state
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result_state_circuit).data, expected_state.data
)
@data((SparsePauliOp(["X", "Z"]), None), (SparsePauliOp(["X", "Z"]), Parameter("t")))
@unpack
def test_trotter_qrte_trotter(self, operator, t_param):
"""Test for default TrotterQRTE on a single qubit with auxiliary operators."""
if not t_param is None:
operator = SparsePauliOp(operator.paulis, np.array([t_param, 1]))
# LieTrotter with 1 rep
aux_ops = [Pauli("X"), Pauli("Y")]
initial_state = QuantumCircuit(1)
time = 3
num_timesteps = 2
evolution_problem = TimeEvolutionProblem(
operator, time, initial_state, aux_ops, t_param=t_param
)
estimator = Estimator()
expected_psi, expected_observables_result = self._get_expected_trotter_qrte(
operator,
time,
num_timesteps,
initial_state,
aux_ops,
t_param,
)
expected_evolved_state = Statevector(expected_psi)
algorithm_globals.random_seed = 0
trotter_qrte = TrotterQRTE(estimator=estimator, num_timesteps=num_timesteps)
evolution_result = trotter_qrte.evolve(evolution_problem)
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result.evolved_state).data,
expected_evolved_state.data,
)
aux_ops_result = evolution_result.aux_ops_evaluated
expected_aux_ops_result = [
(expected_observables_result[-1][0], {"variance": 0, "shots": 0}),
(expected_observables_result[-1][1], {"variance": 0, "shots": 0}),
]
means = [element[0] for element in aux_ops_result]
expected_means = [element[0] for element in expected_aux_ops_result]
np.testing.assert_array_almost_equal(means, expected_means)
vars_and_shots = [element[1] for element in aux_ops_result]
expected_vars_and_shots = [element[1] for element in expected_aux_ops_result]
observables_result = evolution_result.observables
expected_observables_result = [
[(o, {"variance": 0, "shots": 0}) for o in eor] for eor in expected_observables_result
]
means = [sub_element[0] for element in observables_result for sub_element in element]
expected_means = [
sub_element[0] for element in expected_observables_result for sub_element in element
]
np.testing.assert_array_almost_equal(means, expected_means)
for computed, expected in zip(vars_and_shots, expected_vars_and_shots):
self.assertAlmostEqual(computed.pop("variance", 0), expected["variance"], 2)
self.assertEqual(computed.pop("shots", 0), expected["shots"])
@data(
(
PauliSumOp(SparsePauliOp([Pauli("XY"), Pauli("YX")])),
Statevector([-0.41614684 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.90929743 + 0.0j]),
),
(
PauliSumOp(SparsePauliOp([Pauli("ZZ"), Pauli("ZI"), Pauli("IZ")])),
Statevector([-0.9899925 - 0.14112001j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]),
),
(
Pauli("YY"),
Statevector([0.54030231 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.84147098j]),
),
)
@unpack
def test_trotter_qrte_trotter_two_qubits(self, operator, expected_state):
"""Test for TrotterQRTE on two qubits with various types of a Hamiltonian."""
# LieTrotter with 1 rep
initial_state = QuantumCircuit(2)
evolution_problem = TimeEvolutionProblem(operator, 1, initial_state)
trotter_qrte = TrotterQRTE()
evolution_result = trotter_qrte.evolve(evolution_problem)
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result.evolved_state).data, expected_state.data
)
@data(
(QuantumCircuit(1), Statevector([0.23071786 - 0.69436148j, 0.4646314 - 0.49874749j])),
(
QuantumCircuit(1).compose(ZGate(), [0]),
Statevector([0.23071786 - 0.69436148j, 0.4646314 - 0.49874749j]),
),
)
@unpack
def test_trotter_qrte_qdrift(self, initial_state, expected_state):
"""Test for TrotterQRTE with QDrift."""
with self.assertWarns(DeprecationWarning):
operator = PauliSumOp(SparsePauliOp([Pauli("X"), Pauli("Z")]))
time = 1
evolution_problem = TimeEvolutionProblem(operator, time, initial_state)
algorithm_globals.random_seed = 0
trotter_qrte = TrotterQRTE(product_formula=QDrift())
evolution_result = trotter_qrte.evolve(evolution_problem)
np.testing.assert_array_almost_equal(
Statevector.from_instruction(evolution_result.evolved_state).data,
expected_state.data,
)
@data((Parameter("t"), {}), (None, {Parameter("x"): 2}), (None, None))
@unpack
def test_trotter_qrte_trotter_param_errors(self, t_param, param_value_dict):
"""Test TrotterQRTE with raising errors for parameters."""
with self.assertWarns(DeprecationWarning):
operator = Parameter("t") * PauliSumOp(SparsePauliOp([Pauli("X")])) + PauliSumOp(
SparsePauliOp([Pauli("Z")])
)
initial_state = QuantumCircuit(1)
self._run_error_test(initial_state, operator, None, None, t_param, param_value_dict)
@data(([Pauli("X"), Pauli("Y")], None))
@unpack
def test_trotter_qrte_trotter_aux_ops_errors(self, aux_ops, estimator):
"""Test TrotterQRTE with raising errors."""
with self.assertWarns(DeprecationWarning):
operator = PauliSumOp(SparsePauliOp([Pauli("X")])) + PauliSumOp(
SparsePauliOp([Pauli("Z")])
)
initial_state = QuantumCircuit(1)
self._run_error_test(initial_state, operator, aux_ops, estimator, None, None)
@data(
(X, QuantumCircuit(1)),
(MatrixOp([[1, 1], [0, 1]]), QuantumCircuit(1)),
(PauliSumOp(SparsePauliOp([Pauli("X")])) + PauliSumOp(SparsePauliOp([Pauli("Z")])), None),
(
SparsePauliOp([Pauli("X"), Pauli("Z")], np.array([Parameter("a"), Parameter("b")])),
QuantumCircuit(1),
),
)
@unpack
def test_trotter_qrte_trotter_hamiltonian_errors(self, operator, initial_state):
"""Test TrotterQRTE with raising errors for evolution problem content."""
self._run_error_test(initial_state, operator, None, None, None, None)
@staticmethod
def _run_error_test(initial_state, operator, aux_ops, estimator, t_param, param_value_dict):
time = 1
algorithm_globals.random_seed = 0
trotter_qrte = TrotterQRTE(estimator=estimator)
with assert_raises(ValueError):
evolution_problem = TimeEvolutionProblem(
operator,
time,
initial_state,
aux_ops,
t_param=t_param,
param_value_map=param_value_dict,
)
_ = trotter_qrte.evolve(evolution_problem)
@staticmethod
def _get_expected_trotter_qrte(operator, time, num_timesteps, init_state, observables, t_param):
"""Compute reference values for Trotter evolution via exact matrix exponentiation."""
dt = time / num_timesteps
observables = [obs.to_matrix() for obs in observables]
psi = Statevector(init_state).data
if t_param is None:
ops = [Pauli(op).to_matrix() * np.real(coeff) for op, coeff in operator.to_list()]
observable_results = []
observable_results.append([np.real(np.conj(psi).dot(obs).dot(psi)) for obs in observables])
for n in range(num_timesteps):
if t_param is not None:
time_value = (n + 1) * dt
bound = operator.assign_parameters([time_value])
ops = [Pauli(op).to_matrix() * np.real(coeff) for op, coeff in bound.to_list()]
for op in ops:
psi = expm(-1j * op * dt).dot(psi)
observable_results.append(
[np.real(np.conj(psi).dot(obs).dot(psi)) for obs in observables]
)
return psi, observable_results
if __name__ == "__main__":
unittest.main()
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code for QISKit exercises."></form>''')
from qiskit import *
# Quantum program setup
Q_program = QuantumProgram()
# Creating registers
q = Q_program.create_quantum_register('q', 3)
c0 = Q_program.create_classical_register('c0', 1)
c1 = Q_program.create_classical_register('c1', 1)
c2 = Q_program.create_classical_register('c2', 1)
# Creates the quantum circuit
teleport = Q_program.create_circuit('teleport', [q], [c0,c1,c2])
# Make the shared entangled state
teleport.h(q[1])
teleport.cx(q[1], q[2])
# Prepare Alice's qubit
teleport.h(q[0])
# Alice applies teleportation gates (or projects to Bell basis)
teleport.cx(q[0], q[1])
teleport.h(q[0])
# Alice measures her qubits
teleport.measure(q[0], c0[0])
teleport.measure(q[1], c1[0])
# Bob applies certain gates based on the outcome of Alice's measurements
teleport.z(q[2]).c_if(c0, 1)
teleport.x(q[2]).c_if(c1, 1)
# Bob checks the state of the teleported qubit
teleport.measure(q[2], c2[0])
# Shows gates of the circuit
circuits = ['teleport']
print(Q_program.get_qasms(circuits)[0])
# Parameters for execution on simulator
backend = 'local_qasm_simulator'
shots = 1024 # the number of shots in the experiment
# Run the algorithm
result = Q_program.execute(circuits, backend=backend, shots=shots)
#Shows the results obtained from the quantum algorithm
counts = result.get_counts('teleport')
print('\nThe measured outcomes of the circuits are:', counts)
# credits to: https://github.com/QISKit/qiskit-tutorial
from initialize import *
from qiskit import *
#initialize quantum program
my_alg = initialize(circuit_name = 'superdense', qubit_number=2, bit_number=2, backend = 'local_qasm_simulator', shots = 1024)
#add gates to the circuit
#creates a bell pair
my_alg.q_circuit.h(my_alg.q_reg[0]) # applies H gate to first qubit
my_alg.q_circuit.cx(my_alg.q_reg[0],my_alg.q_reg[1]) ## applies CX gate
#Alice encodes 01
my_alg.q_circuit.x(my_alg.q_reg[0])
#to measure in the Bell basis, Bob does the following operations before measuring in the standard basis
my_alg.q_circuit.cx(my_alg.q_reg[0],my_alg.q_reg[1]) ## applies CX gate
my_alg.q_circuit.h(my_alg.q_reg[0]) # applies H gate to first qubit
my_alg.q_circuit.measure(my_alg.q_reg[0], my_alg.c_reg[0]) # measures the first qubit
my_alg.q_circuit.measure(my_alg.q_reg[1], my_alg.c_reg[1]) # measures the second qubit
print('List of gates:')
for circuit in my_alg.q_circuit:
print(circuit.name)
#Execute the quantum algorithm
result = my_alg.Q_program.execute(my_alg.circ_name, backend=my_alg.backend, shots= my_alg.shots)
#Show the results obtained from the quantum algorithm
counts = result.get_counts(my_alg.circ_name)
print('\nThe measured outcomes of the circuits are:',counts)
# credits to: https://github.com/QISKit/qiskit-tutorial
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import sys
!git checkout 44bfc66a8aef1baf6794ab43ea9c73e5be34ce8e && {sys.executable} -m pip install -e . > /dev/null
import numpy as np
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_machine_learning.algorithms.classifiers import VQC
features = np.random.rand(20, 2)
target = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
target = np.tile(target, 2)
print(f"Features:\n{features.T}")
print(f"Target:", target)
onehot_target = np.zeros((target.size, int(target.max() + 1)))
onehot_target[np.arange(target.size), target.astype(int)] = 1
print(onehot_target.T)
backend = Aer.get_backend("aer_simulator_statevector")
quantum_instance = QuantumInstance(backend)
vqc = VQC(
num_qubits=2,
loss="cross_entropy",
warm_start=True,
quantum_instance=quantum_instance,
)
vqc.fit(features[:10, :], onehot_target[:10])
print("First fit complete.")
vqc.fit(features[10:, :], onehot_target[10:])
print("Second fit complete.")
import sys
!git checkout 5a02c7639db6a86beb9a944b14721935a48932e6 && {sys.executable} -m pip install -e . > /dev/null
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import pulse
d0 = pulse.DriveChannel(0)
x90 = pulse.Gaussian(10, 0.1, 3)
x180 = pulse.Gaussian(10, 0.2, 3)
def udd10_pos(j):
return np.sin(np.pi*j/(2*10 + 2))**2
with pulse.build() as udd_sched:
pulse.play(x90, d0)
with pulse.align_func(duration=300, func=udd10_pos):
for _ in range(10):
pulse.play(x180, d0)
pulse.play(x90, d0)
udd_sched.draw()
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
import numpy as np
zero_ket = np.array([[1], [0]])
print("|0> ket:\n", zero_ket)
print("<0| bra:\n", zero_ket.T.conj())
zero_ket.T.conj().dot(zero_ket)
one_ket = np.array([[0], [1]])
zero_ket.T.conj().dot(one_ket)
zero_ket.dot(zero_ket.T.conj())
ψ = np.array([[1], [1]])/np.sqrt(2)
Π_0 = zero_ket.dot(zero_ket.T.conj())
ψ.T.conj().dot(Π_0.dot(ψ))
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
from qiskit import BasicAer
from qiskit.tools.visualization import plot_histogram
backend = BasicAer.get_backend('qasm_simulator')
q = QuantumRegister(1)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
ψ = np.array([[np.sqrt(2)/2], [np.sqrt(2)/2]])
Π_0 = zero_ket.dot(zero_ket.T.conj())
probability_0 = ψ.T.conj().dot(Π_0.dot(ψ))
Π_0.dot(ψ)/np.sqrt(probability_0)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q[0], c[0])
circuit.measure(q[0], c[1])
job = execute(circuit, backend, shots=100)
job.result().get_counts(circuit)
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.cx(q[0], q[1])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
ψ = np.array([[1], [1]])/np.sqrt(2)
ρ = ψ.dot(ψ.T.conj())
Π_0 = zero_ket.dot(zero_ket.T.conj())
np.trace(Π_0.dot(ρ))
probability_0 = np.trace(Π_0.dot(ρ))
Π_0.dot(ρ).dot(Π_0)/probability_0
zero_ket = np.array([[1], [0]])
one_ket = np.array([[0], [1]])
ψ = (zero_ket + one_ket)/np.sqrt(2)
print("Density matrix of the equal superposition")
print(ψ.dot(ψ.T.conj()))
print("Density matrix of the equally mixed state of |0><0| and |1><1|")
print((zero_ket.dot(zero_ket.T.conj())+one_ket.dot(one_ket.T.conj()))/2)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import transpile
from qiskit import QuantumCircuit
from qiskit.providers.fake_provider import FakeVigoV2
backend = FakeVigoV2()
qc = QuantumCircuit(2, 1)
qc.h(0)
qc.x(1)
qc.cp(np.pi/4, 0, 1)
qc.h(0)
qc.measure([0], [0])
qc_basis = transpile(qc, backend)
qc_basis.draw(output='mpl')
|
https://github.com/rubenandrebarreiro/summer-school-on-quantum-computing-software-for-near-term-quantum-devices-2020
|
rubenandrebarreiro
|
from qiskit import Aer
from qiskit.circuit.library import TwoLocal
from qiskit.aqua import QuantumInstance
from qiskit.finance.applications.ising import portfolio
from qiskit.optimization.applications.ising.common import sample_most_likely
from qiskit.finance.data_providers import RandomDataProvider
from qiskit.aqua.algorithms import VQE, QAOA, NumPyMinimumEigensolver
from qiskit.aqua.components.optimizers import COBYLA
import numpy as np
import warnings
import matplotlib.pyplot as plt
import datetime
warnings.filterwarnings('ignore')
# set number of assets (= number of qubits)
num_assets = 5
# Generate expected return and covariance matrix from (random) time-series
stocks = [("TICKER%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(2016,1,1),
end=datetime.datetime(2016,1,30))
data.run()
mu = data.get_period_return_mean_vector()
sigma = data.get_period_return_covariance_matrix()
# plot sigma
plt.imshow(sigma, interpolation='nearest')
plt.show()
q = 0.5 # set risk factor
budget = num_assets // 2 # set budget
penalty = num_assets # set parameter to scale the budget penalty term
qubitOp, offset = portfolio.get_operator(mu, sigma, q, budget, penalty)
def index_to_selection(i, num_assets):
s = "{0:b}".format(i).rjust(num_assets)
x = np.array([1 if s[i]=='1' else 0 for i in reversed(range(num_assets))])
return x
def print_result(result):
selection = sample_most_likely(result.eigenstate)
value = portfolio.portfolio_value(selection, mu, sigma, q, budget, penalty)
print('Optimal: selection {}, value {:.4f}'.format(selection, value))
eigenvector = result.eigenstate if isinstance(result.eigenstate, np.ndarray) else result.eigenstate.to_matrix()
probabilities = np.abs(eigenvector)**2
i_sorted = reversed(np.argsort(probabilities))
print('\n----------------- Full result ---------------------')
print('selection\tvalue\t\tprobability')
print('---------------------------------------------------')
for i in i_sorted:
x = index_to_selection(i, num_assets)
value = portfolio.portfolio_value(x, mu, sigma, q, budget, penalty)
probability = probabilities[i]
print('%10s\t%.4f\t\t%.4f' %(x, value, probability))
exact_eigensolver = NumPyMinimumEigensolver(qubitOp)
result = exact_eigensolver.run()
print_result(result)
backend = Aer.get_backend('statevector_simulator')
seed = 50
cobyla = COBYLA()
cobyla.set_options(maxiter=500)
ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=5, entanglement='linear')
vqe = VQE(qubitOp, ry, cobyla)
vqe.random_seed = seed
quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed)
result = vqe.run(quantum_instance)
print_result(result)
backend = Aer.get_backend('statevector_simulator')
seed = 50
cobyla = COBYLA()
cobyla.set_options(maxiter=250)
qaoa = QAOA(qubitOp, cobyla, 3)
qaoa.random_seed = seed
quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed)
result = qaoa.run(quantum_instance)
print_result(result)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/steffencruz/FockWits
|
steffencruz
|
import sys
sys.path.append("/home/artix41/Toronto/qiskit-terra/")
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import Aer, execute
import numpy as np
import matplotlib.pyplot as plt
from CVCircuit import CVCircuit
def bose_hubard(n_layers=2, J=1, U=0.1, t=0):
# ===== Constants =====
n_qubits_per_mode = 3
n_qumodes = 2
alpha = 1
phi = np.pi/16
# ==== Initialize circuit =====
qr = QuantumRegister(n_qubits_per_mode*n_qumodes)
cr = ClassicalRegister(n_qubits_per_mode*n_qumodes)
circuit = QuantumCircuit(qr, cr)
cv_circuit = CVCircuit(circuit, qr, n_qubits_per_mode)
# ==== Build circuit ====
cv_circuit.initialize([0,0])
for layer in range(n_layers):
phi = -1j * J * t / n_layers
r = -U * t / (2*n_layers)
cv_circuit.BSGate(phi, (0,1))
cv_circuit.KGate(r, 0)
cv_circuit.KGate(-r, 1)
cv_circuit.RGate(r, 0)
cv_circuit.RGate(-r, 1)
print(circuit)
# ==== Compilation =====
backend = Aer.get_backend('statevector_simulator')
job = execute(circuit, backend)
result = job.result()
state = job.result().get_statevector(circuit)
# ==== Tests ====
print(state)
prob = np.abs(state)**2
plt.plot(prob,'-o')
plt.show()
if __name__ == '__main__':
bose_hubard(t=1)
# def BHM(self,tmax,k,J=1,U=0.1):
# #Implement simple two-mode Bose-Hubbard simulation
# j = np.complex(0,1)
# BKR = {}
# for t in np.linspace(0,tmax,10):
# for layer in range(k):
# phi = -j*J*t/k
# r = -U*t/(2*k)
# BS = self.BS(phi)
# K = np.kron(self.mode.K(r),self.mode.K(r))
# R = np.kron(self.mode.R(-r),self.mode.R(-r))
# BK = np.matmul(BS,K)
# BKR[t] = np.matmul(BK,R)
# return BKR
|
https://github.com/lynnlangit/learning-quantum
|
lynnlangit
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
import cirq
qubit = cirq.NamedQubit("myqubit")
# creates an equal superposition of |0> and |1> when simulated
circuit = cirq.Circuit(cirq.H(qubit))
# see the "myqubit" identifier at the left of the circuit
print(circuit)
# run simulation
result = cirq.Simulator().simulate(circuit)
print("result:")
print(result)
## Programming Quantum Computers
## by Eric Johnston, Nic Harrigan and Mercedes Gimeno-Segovia
## O'Reilly Media
##
## More samples like this can be found at http://oreilly-qc.github.io
import cirq
## This sample generates a single random bit.
## Example 2-1: Random bit
# Set up the program
def main():
qc = QPU()
qc.reset(1)
qc.had() # put it into a superposition of 0 and 1
qc.read() # read the result as a digital bit
qc.draw() # draw the circuit
result = qc.run() # run the circuit
print(result)
######################################################################
## The below class is a light interface, to convert the
## book's syntax into the syntax used by Cirq.
class QPU:
def __init__(self):
self.circuit = cirq.Circuit()
self.simulator = cirq.Simulator()
self.qubits = None
def reset(self, num_qubits):
self.qubits = [cirq.GridQubit(i, 0) for i in range(num_qubits)]
def mask_to_list(self, mask):
return [q for i,q in enumerate(self.qubits) if (1 << i) & mask]
def had(self, target_mask=~0):
target = self.mask_to_list(target_mask)
self.circuit.append(cirq.H.on_each(*target))
def read(self, target_mask=~0, key=None):
if key is None:
key = 'result'
target = self.mask_to_list(target_mask)
self.circuit.append(cirq.measure(*target, key=key))
def draw(self):
print('Circuit:\n{}'.format(self.circuit))
def run(self):
return self.simulator.simulate(self.circuit)
if __name__ == '__main__':
main()
|
https://github.com/zapata-engineering/orquestra-qiskit
|
zapata-engineering
|
################################################################################
# © Copyright 2021-2022 Zapata Computing Inc.
################################################################################
"""Translations between Qiskit parameter expressions and intermediate expression trees.
"""
import operator
from functools import lru_cache, reduce, singledispatch
from numbers import Number
import qiskit
from orquestra.quantum.circuits.symbolic.expressions import ExpressionDialect, reduction
from orquestra.quantum.circuits.symbolic.sympy_expressions import expression_from_sympy
from ._symengine_expressions import expression_from_symengine
@singledispatch
def expression_from_qiskit(expression):
"""Parse Qiskit expression into intermediate expression tree."""
raise NotImplementedError(
f"Expression {expression} of type {type(expression)} is currently not supported"
)
@expression_from_qiskit.register
def _number_identity(number: Number):
return number
@expression_from_qiskit.register
def _expr_from_qiskit_param_expr(
qiskit_expr: qiskit.circuit.parameterexpression.ParameterExpression,
):
# At the moment of writing this the qiskit version that we use (0.23.2) as well
# as the newest version 0.23.5) does not provide a better way to access symbolic
# expression wrapped by ParameterExpression.
inner_expr = qiskit_expr._symbol_expr
try:
return expression_from_symengine(inner_expr)
except NotImplementedError:
return expression_from_sympy(inner_expr)
def integer_pow(base, exponent: int):
"""Exponentiation to the power of an integer exponent."""
if not isinstance(exponent, int):
raise ValueError(
f"Cannot convert expression {base} ** {exponent} to Qiskit. "
"Only powers with integral exponent are convertible."
)
if exponent < 0:
if base != 0:
base = 1 / base
exponent = -exponent
else:
raise ValueError(
f"Invalid power: cannot raise 0 to exponent {exponent} < 0."
)
return reduce(operator.mul, exponent * [base], 1)
@lru_cache(maxsize=None)
def _qiskit_param_from_name(name):
return qiskit.circuit.Parameter(name)
QISKIT_DIALECT = ExpressionDialect(
symbol_factory=lambda symbol: _qiskit_param_from_name(symbol.name),
number_factory=lambda number: number,
known_functions={
"add": reduction(operator.add),
"mul": reduction(operator.mul),
"div": operator.truediv,
"sub": operator.sub,
"pow": integer_pow,
},
)
"""Mapping from the intermediate expression tree into Qiskit symbolic expression atoms.
Allows translating an expression into the Qiskit dialect. Can be used with
`orquestra.quantum.circuits.symbolic.translations.translate_expression`.
"""
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub="ibm-q", group="open", project="main")
program_id = "qaoa"
qaoa_program = provider.runtime.program(program_id)
print(f"Program name: {qaoa_program.name}, Program id: {qaoa_program.program_id}")
print(qaoa_program.parameters())
import numpy as np
from qiskit.tools import job_monitor
from qiskit.opflow import PauliSumOp, Z, I
from qiskit.algorithms.optimizers import SPSA
# Define the cost operator to run.
op = (
(Z ^ Z ^ I ^ I ^ I)
- (I ^ I ^ Z ^ Z ^ I)
+ (I ^ I ^ Z ^ I ^ Z)
- (Z ^ I ^ Z ^ I ^ I)
- (I ^ Z ^ Z ^ I ^ I)
+ (I ^ Z ^ I ^ Z ^ I)
+ (I ^ I ^ I ^ Z ^ Z)
)
# SPSA helps deal with noisy environments.
optimizer = SPSA(maxiter=100)
# We will run a depth two QAOA.
reps = 2
# The initial point for the optimization, chosen at random.
initial_point = np.random.random(2 * reps)
# The backend that will run the programm.
options = {"backend_name": "ibmq_qasm_simulator"}
# The inputs of the program as described above.
runtime_inputs = {
"operator": op,
"reps": reps,
"optimizer": optimizer,
"initial_point": initial_point,
"shots": 2**13,
# Set to True when running on real backends to reduce circuit
# depth by leveraging swap strategies. If False the
# given optimization_level (default is 1) will be used.
"use_swap_strategies": False,
# Set to True when optimizing sparse problems.
"use_initial_mapping": False,
# Set to true when using echoed-cross-resonance hardware.
"use_pulse_efficient": False,
}
job = provider.runtime.run(
program_id=program_id,
options=options,
inputs=runtime_inputs,
)
job_monitor(job)
print(f"Job id: {job.job_id()}")
print(f"Job status: {job.status()}")
result = job.result()
from collections import defaultdict
def op_adj_mat(op: PauliSumOp) -> np.array:
"""Extract the adjacency matrix from the op."""
adj_mat = np.zeros((op.num_qubits, op.num_qubits))
for pauli, coeff in op.primitive.to_list():
idx = tuple([i for i, c in enumerate(pauli[::-1]) if c == "Z"]) # index of Z
adj_mat[idx[0], idx[1]], adj_mat[idx[1], idx[0]] = np.real(coeff), np.real(coeff)
return adj_mat
def get_cost(bit_str: str, adj_mat: np.array) -> float:
"""Return the cut value of the bit string."""
n, x = len(bit_str), [int(bit) for bit in bit_str[::-1]]
cost = 0
for i in range(n):
for j in range(n):
cost += adj_mat[i, j] * x[i] * (1 - x[j])
return cost
def get_cut_distribution(result) -> dict:
"""Extract the cut distribution from the result.
Returns:
A dict of cut value: probability.
"""
adj_mat = op_adj_mat(PauliSumOp.from_list(result["inputs"]["operator"]))
state_results = []
for bit_str, amp in result["eigenstate"].items():
state_results.append((bit_str, get_cost(bit_str, adj_mat), amp**2 * 100))
vals = defaultdict(int)
for res in state_results:
vals[res[1]] += res[2]
return dict(vals)
import matplotlib.pyplot as plt
cut_vals = get_cut_distribution(result)
fig, axs = plt.subplots(1, 2, figsize=(14, 5))
axs[0].plot(result["optimizer_history"]["energy"])
axs[1].bar(list(cut_vals.keys()), list(cut_vals.values()))
axs[0].set_xlabel("Energy evaluation number")
axs[0].set_ylabel("Energy")
axs[1].set_xlabel("Cut value")
axs[1].set_ylabel("Probability")
from qiskit_optimization.runtime import QAOAClient
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization import QuadraticProgram
qubo = QuadraticProgram()
qubo.binary_var("x")
qubo.binary_var("y")
qubo.binary_var("z")
qubo.minimize(linear=[1, -2, 3], quadratic={("x", "y"): 1, ("x", "z"): -1, ("y", "z"): 2})
print(qubo.prettyprint())
qaoa_mes = QAOAClient(
provider=provider, backend=provider.get_backend("ibmq_qasm_simulator"), reps=2, alpha=0.75
)
qaoa = MinimumEigenOptimizer(qaoa_mes)
result = qaoa.solve(qubo)
print(result.prettyprint())
from qiskit.transpiler import PassManager
from qiskit.circuit.library.standard_gates.equivalence_library import (
StandardEquivalenceLibrary as std_eqlib,
)
from qiskit.transpiler.passes import (
Collect2qBlocks,
ConsolidateBlocks,
UnrollCustomDefinitions,
BasisTranslator,
Optimize1qGatesDecomposition,
)
from qiskit.transpiler.passes.calibration.builders import RZXCalibrationBuilderNoEcho
from qiskit.transpiler.passes.optimization.echo_rzx_weyl_decomposition import (
EchoRZXWeylDecomposition,
)
from qiskit.test.mock import FakeBelem
backend = FakeBelem()
inst_map = backend.defaults().instruction_schedule_map
channel_map = backend.configuration().qubit_channel_mapping
rzx_basis = ["rzx", "rz", "x", "sx"]
pulse_efficient = PassManager(
[
# Consolidate consecutive two-qubit operations.
Collect2qBlocks(),
ConsolidateBlocks(basis_gates=["rz", "sx", "x", "rxx"]),
# Rewrite circuit in terms of Weyl-decomposed echoed RZX gates.
EchoRZXWeylDecomposition(backend.defaults().instruction_schedule_map),
# Attach scaled CR pulse schedules to the RZX gates.
RZXCalibrationBuilderNoEcho(
instruction_schedule_map=inst_map, qubit_channel_mapping=channel_map
),
# Simplify single-qubit gates.
UnrollCustomDefinitions(std_eqlib, rzx_basis),
BasisTranslator(std_eqlib, rzx_basis),
Optimize1qGatesDecomposition(rzx_basis),
]
)
from qiskit import QuantumCircuit
circ = QuantumCircuit(3)
circ.h([0, 1, 2])
circ.rzx(0.5, 0, 1)
circ.swap(0, 1)
circ.cx(2, 1)
circ.rz(0.4, 1)
circ.cx(2, 1)
circ.rx(1.23, 2)
circ.cx(2, 1)
circ.draw("mpl")
pulse_efficient.run(circ).draw("mpl", fold=False)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit import compiler, BasicAer, QuantumRegister
from qiskit.converters import circuit_to_dag
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
def convert_to_basis_gates(circuit):
# unroll the circuit using the basis u1, u2, u3, cx, and id gates
unroller = Unroller(basis=['u1', 'u2', 'u3', 'cx', 'id'])
pm = PassManager(passes=[unroller])
qc = compiler.transpile(circuit, BasicAer.get_backend('qasm_simulator'), pass_manager=pm)
return qc
def is_qubit(qb):
# check if the input is a qubit, which is in the form (QuantumRegister, int)
return isinstance(qb, tuple) and isinstance(qb[0], QuantumRegister) and isinstance(qb[1], int)
def is_qubit_list(qbs):
# check if the input is a list of qubits
for qb in qbs:
if not is_qubit(qb):
return False
return True
def summarize_circuits(circuits):
"""Summarize circuits based on QuantumCircuit, and four metrics are summarized.
Number of qubits and classical bits, and number of operations and depth of circuits.
The average statistic is provided if multiple circuits are inputed.
Args:
circuits (QuantumCircuit or [QuantumCircuit]): the to-be-summarized circuits
"""
if not isinstance(circuits, list):
circuits = [circuits]
ret = ""
ret += "Submitting {} circuits.\n".format(len(circuits))
ret += "============================================================================\n"
stats = np.zeros(4)
for i, circuit in enumerate(circuits):
dag = circuit_to_dag(circuit)
depth = dag.depth()
width = dag.width()
size = dag.size()
classical_bits = dag.num_cbits()
op_counts = dag.count_ops()
stats[0] += width
stats[1] += classical_bits
stats[2] += size
stats[3] += depth
ret = ''.join([ret, "{}-th circuit: {} qubits, {} classical bits and {} operations with depth {}\n op_counts: {}\n".format(
i, width, classical_bits, size, depth, op_counts)])
if len(circuits) > 1:
stats /= len(circuits)
ret = ''.join([ret, "Average: {:.2f} qubits, {:.2f} classical bits and {:.2f} operations with depth {:.2f}\n".format(
stats[0], stats[1], stats[2], stats[3])])
ret += "============================================================================\n"
return ret
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Durations of instructions, one of transpiler configurations."""
from __future__ import annotations
from typing import Optional, List, Tuple, Union, Iterable
import qiskit.circuit
from qiskit.circuit import Barrier, Delay
from qiskit.circuit import Instruction, Qubit, ParameterExpression
from qiskit.circuit.duration import duration_in_dt
from qiskit.providers import Backend
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.utils.deprecation import deprecate_arg
from qiskit.utils.units import apply_prefix
def _is_deprecated_qubits_argument(qubits: Union[int, list[int], Qubit, list[Qubit]]) -> bool:
if isinstance(qubits, (int, Qubit)):
qubits = [qubits]
return isinstance(qubits[0], Qubit)
class InstructionDurations:
"""Helper class to provide durations of instructions for scheduling.
It stores durations (gate lengths) and dt to be used at the scheduling stage of transpiling.
It can be constructed from ``backend`` or ``instruction_durations``,
which is an argument of :func:`transpile`. The duration of an instruction depends on the
instruction (given by name), the qubits, and optionally the parameters of the instruction.
Note that these fields are used as keys in dictionaries that are used to retrieve the
instruction durations. Therefore, users must use the exact same parameter value to retrieve
an instruction duration as the value with which it was added.
"""
def __init__(
self, instruction_durations: "InstructionDurationsType" | None = None, dt: float = None
):
self.duration_by_name: dict[str, tuple[float, str]] = {}
self.duration_by_name_qubits: dict[tuple[str, tuple[int, ...]], tuple[float, str]] = {}
self.duration_by_name_qubits_params: dict[
tuple[str, tuple[int, ...], tuple[float, ...]], tuple[float, str]
] = {}
self.dt = dt
if instruction_durations:
self.update(instruction_durations)
def __str__(self):
"""Return a string representation of all stored durations."""
string = ""
for k, v in self.duration_by_name.items():
string += k
string += ": "
string += str(v[0]) + " " + v[1]
string += "\n"
for k, v in self.duration_by_name_qubits.items():
string += k[0] + str(k[1])
string += ": "
string += str(v[0]) + " " + v[1]
string += "\n"
return string
@classmethod
def from_backend(cls, backend: Backend):
"""Construct an :class:`InstructionDurations` object from the backend.
Args:
backend: backend from which durations (gate lengths) and dt are extracted.
Returns:
InstructionDurations: The InstructionDurations constructed from backend.
Raises:
TranspilerError: If dt and dtm is different in the backend.
"""
# All durations in seconds in gate_length
instruction_durations = []
backend_properties = backend.properties()
if hasattr(backend_properties, "_gates"):
for gate, insts in backend_properties._gates.items():
for qubits, props in insts.items():
if "gate_length" in props:
gate_length = props["gate_length"][0] # Throw away datetime at index 1
instruction_durations.append((gate, qubits, gate_length, "s"))
for q, props in backend.properties()._qubits.items():
if "readout_length" in props:
readout_length = props["readout_length"][0] # Throw away datetime at index 1
instruction_durations.append(("measure", [q], readout_length, "s"))
try:
dt = backend.configuration().dt
except AttributeError:
dt = None
return InstructionDurations(instruction_durations, dt=dt)
def update(self, inst_durations: "InstructionDurationsType" | None, dt: float = None):
"""Update self with inst_durations (inst_durations overwrite self).
Args:
inst_durations: Instruction durations to be merged into self (overwriting self).
dt: Sampling duration in seconds of the target backend.
Returns:
InstructionDurations: The updated InstructionDurations.
Raises:
TranspilerError: If the format of instruction_durations is invalid.
"""
if dt:
self.dt = dt
if inst_durations is None:
return self
if isinstance(inst_durations, InstructionDurations):
self.duration_by_name.update(inst_durations.duration_by_name)
self.duration_by_name_qubits.update(inst_durations.duration_by_name_qubits)
self.duration_by_name_qubits_params.update(
inst_durations.duration_by_name_qubits_params
)
else:
for i, items in enumerate(inst_durations):
if not isinstance(items[-1], str):
items = (*items, "dt") # set default unit
if len(items) == 4: # (inst_name, qubits, duration, unit)
inst_durations[i] = (*items[:3], None, items[3])
else:
inst_durations[i] = items
# assert (inst_name, qubits, duration, parameters, unit)
if len(inst_durations[i]) != 5:
raise TranspilerError(
"Each entry of inst_durations dictionary must be "
"(inst_name, qubits, duration) or "
"(inst_name, qubits, duration, unit) or"
"(inst_name, qubits, duration, parameters) or"
"(inst_name, qubits, duration, parameters, unit) "
f"received {inst_durations[i]}."
)
if inst_durations[i][2] is None:
raise TranspilerError(f"None duration for {inst_durations[i]}.")
for name, qubits, duration, parameters, unit in inst_durations:
if isinstance(qubits, int):
qubits = [qubits]
if isinstance(parameters, (int, float)):
parameters = [parameters]
if qubits is None:
self.duration_by_name[name] = duration, unit
elif parameters is None:
self.duration_by_name_qubits[(name, tuple(qubits))] = duration, unit
else:
key = (name, tuple(qubits), tuple(parameters))
self.duration_by_name_qubits_params[key] = duration, unit
return self
@deprecate_arg(
"qubits",
deprecation_description=(
"Using a Qubit or List[Qubit] for the ``qubits`` argument to InstructionDurations.get()"
),
additional_msg="Instead, use an integer for the qubit index.",
since="0.19.0",
predicate=_is_deprecated_qubits_argument,
)
def get(
self,
inst: str | qiskit.circuit.Instruction,
qubits: int | list[int] | Qubit | list[Qubit] | list[int | Qubit],
unit: str = "dt",
parameters: list[float] | None = None,
) -> float:
"""Get the duration of the instruction with the name, qubits, and parameters.
Some instructions may have a parameter dependent duration.
Args:
inst: An instruction or its name to be queried.
qubits: Qubits or its indices that the instruction acts on.
unit: The unit of duration to be returned. It must be 's' or 'dt'.
parameters: The value of the parameters of the desired instruction.
Returns:
float|int: The duration of the instruction on the qubits.
Raises:
TranspilerError: No duration is defined for the instruction.
"""
if isinstance(inst, Barrier):
return 0
elif isinstance(inst, Delay):
return self._convert_unit(inst.duration, inst.unit, unit)
if isinstance(inst, Instruction):
inst_name = inst.name
else:
inst_name = inst
if isinstance(qubits, (int, Qubit)):
qubits = [qubits]
if isinstance(qubits[0], Qubit):
qubits = [q.index for q in qubits]
try:
return self._get(inst_name, qubits, unit, parameters)
except TranspilerError as ex:
raise TranspilerError(
f"Duration of {inst_name} on qubits {qubits} is not found."
) from ex
def _get(
self,
name: str,
qubits: list[int],
to_unit: str,
parameters: Iterable[float] | None = None,
) -> float:
"""Get the duration of the instruction with the name, qubits, and parameters."""
if name == "barrier":
return 0
if parameters is not None:
key = (name, tuple(qubits), tuple(parameters))
else:
key = (name, tuple(qubits))
if key in self.duration_by_name_qubits_params:
duration, unit = self.duration_by_name_qubits_params[key]
elif key in self.duration_by_name_qubits:
duration, unit = self.duration_by_name_qubits[key]
elif name in self.duration_by_name:
duration, unit = self.duration_by_name[name]
else:
raise TranspilerError(f"No value is found for key={key}")
return self._convert_unit(duration, unit, to_unit)
def _convert_unit(self, duration: float, from_unit: str, to_unit: str) -> float:
if from_unit.endswith("s") and from_unit != "s":
duration = apply_prefix(duration, from_unit)
from_unit = "s"
# assert both from_unit and to_unit in {'s', 'dt'}
if from_unit == to_unit:
return duration
if self.dt is None:
raise TranspilerError(
f"dt is necessary to convert durations from '{from_unit}' to '{to_unit}'"
)
if from_unit == "s" and to_unit == "dt":
if isinstance(duration, ParameterExpression):
return duration / self.dt
return duration_in_dt(duration, self.dt)
elif from_unit == "dt" and to_unit == "s":
return duration * self.dt
else:
raise TranspilerError(f"Conversion from '{from_unit}' to '{to_unit}' is not supported")
def units_used(self) -> set[str]:
"""Get the set of all units used in this instruction durations.
Returns:
Set of units used in this instruction durations.
"""
units_used = set()
for _, unit in self.duration_by_name_qubits.values():
units_used.add(unit)
for _, unit in self.duration_by_name.values():
units_used.add(unit)
return units_used
InstructionDurationsType = Union[
List[Tuple[str, Optional[Iterable[int]], float, Optional[Iterable[float]], str]],
List[Tuple[str, Optional[Iterable[int]], float, Optional[Iterable[float]]]],
List[Tuple[str, Optional[Iterable[int]], float, str]],
List[Tuple[str, Optional[Iterable[int]], float]],
InstructionDurations,
]
"""List of tuples representing (instruction name, qubits indices, parameters, duration)."""
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
# https://quantum-circuit.com/app_details/about/LFQv9PKwerh3EzrLw
# searched oracle element is '0010'
qc = QuantumCircuit()
q = QuantumRegister(4, 'q')
ro = ClassicalRegister(4, 'ro')
qc.add_register(q)
qc.add_register(ro)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.x(q[0])
qc.x(q[2])
qc.x(q[3])
qc.crz(0.785398163397, q[0], q[3])
qc.cx(q[0], q[1])
qc.crz(-0.785398163397, q[1], q[3])
qc.cx(q[0], q[1])
qc.crz(0.785398163397, q[1], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.x(q[0])
qc.x(q[2])
qc.x(q[3])
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.x(q[0])
qc.x(q[1])
qc.x(q[2])
qc.x(q[3])
qc.crz(0.785398163397, q[0], q[3])
qc.cx(q[0], q[1])
qc.crz(-0.785398163397, q[1], q[3])
qc.cx(q[0], q[1])
qc.crz(0.785398163397, q[1], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.x(q[0])
qc.x(q[1])
qc.x(q[2])
qc.x(q[3])
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.measure(q[0], ro[0])
qc.measure(q[1], ro[1])
qc.measure(q[2], ro[2])
qc.measure(q[3], ro[3])
def get_circuit(**kwargs):
return qc
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Visualization function for a pass manager. Passes are grouped based on their
flow controller, and coloured based on the type of pass.
"""
import os
import inspect
import tempfile
from qiskit.utils import optionals as _optionals
from qiskit.transpiler.basepasses import AnalysisPass, TransformationPass
from .exceptions import VisualizationError
DEFAULT_STYLE = {AnalysisPass: "red", TransformationPass: "blue"}
@_optionals.HAS_GRAPHVIZ.require_in_call
@_optionals.HAS_PYDOT.require_in_call
def pass_manager_drawer(pass_manager, filename=None, style=None, raw=False):
"""
Draws the pass manager.
This function needs `pydot <https://github.com/erocarrera/pydot>`__, which in turn needs
`Graphviz <https://www.graphviz.org/>`__ to be installed.
Args:
pass_manager (PassManager): the pass manager to be drawn
filename (str): file path to save image to
style (dict or OrderedDict): keys are the pass classes and the values are
the colors to make them. An example can be seen in the DEFAULT_STYLE. An ordered
dict can be used to ensure a priority coloring when pass falls into multiple
categories. Any values not included in the provided dict will be filled in from
the default dict
raw (Bool) : True if you want to save the raw Dot output not an image. The
default is False.
Returns:
PIL.Image or None: an in-memory representation of the pass manager. Or None if
no image was generated or PIL is not installed.
Raises:
MissingOptionalLibraryError: when nxpd or pydot not installed.
VisualizationError: If raw=True and filename=None.
Example:
.. code-block::
%matplotlib inline
from qiskit import QuantumCircuit
from qiskit.compiler import transpile
from qiskit.transpiler import PassManager
from qiskit.visualization import pass_manager_drawer
from qiskit.transpiler.passes import Unroller
circ = QuantumCircuit(3)
circ.ccx(0, 1, 2)
circ.draw()
pass_ = Unroller(['u1', 'u2', 'u3', 'cx'])
pm = PassManager(pass_)
new_circ = pm.run(circ)
new_circ.draw(output='mpl')
pass_manager_drawer(pm, "passmanager.jpg")
"""
import pydot
passes = pass_manager.passes()
if not style:
style = DEFAULT_STYLE
# create the overall graph
graph = pydot.Dot()
# identifiers for nodes need to be unique, so assign an id
# can't just use python's id in case the exact same pass was
# appended more than once
component_id = 0
prev_node = None
for index, controller_group in enumerate(passes):
subgraph, component_id, prev_node = draw_subgraph(
controller_group, component_id, style, prev_node, index
)
graph.add_subgraph(subgraph)
output = make_output(graph, raw, filename)
return output
def _get_node_color(pss, style):
# look in the user provided dict first
for typ, color in style.items():
if isinstance(pss, typ):
return color
# failing that, look in the default
for typ, color in DEFAULT_STYLE.items():
if isinstance(pss, typ):
return color
return "black"
@_optionals.HAS_GRAPHVIZ.require_in_call
@_optionals.HAS_PYDOT.require_in_call
def staged_pass_manager_drawer(pass_manager, filename=None, style=None, raw=False):
"""
Draws the staged pass manager.
This function needs `pydot <https://github.com/erocarrera/pydot>`__, which in turn needs
`Graphviz <https://www.graphviz.org/>`__ to be installed.
Args:
pass_manager (StagedPassManager): the staged pass manager to be drawn
filename (str): file path to save image to
style (dict or OrderedDict): keys are the pass classes and the values are
the colors to make them. An example can be seen in the DEFAULT_STYLE. An ordered
dict can be used to ensure a priority coloring when pass falls into multiple
categories. Any values not included in the provided dict will be filled in from
the default dict
raw (Bool) : True if you want to save the raw Dot output not an image. The
default is False.
Returns:
PIL.Image or None: an in-memory representation of the pass manager. Or None if
no image was generated or PIL is not installed.
Raises:
MissingOptionalLibraryError: when nxpd or pydot not installed.
VisualizationError: If raw=True and filename=None.
Example:
.. code-block::
%matplotlib inline
from qiskit.providers.fake_provider import FakeLagosV2
from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager
pass_manager = generate_preset_pass_manager(3, FakeLagosV2())
pass_manager.draw()
"""
import pydot
# only include stages that have passes
stages = list(filter(lambda s: s is not None, pass_manager.expanded_stages))
if not style:
style = DEFAULT_STYLE
# create the overall graph
graph = pydot.Dot()
# identifiers for nodes need to be unique, so assign an id
# can't just use python's id in case the exact same pass was
# appended more than once
component_id = 0
# keep a running count of indexes across stages
idx = 0
prev_node = None
for st in stages:
stage = getattr(pass_manager, st)
if stage is not None:
passes = stage.passes()
stagegraph = pydot.Cluster(str(st), label=str(st), fontname="helvetica", labeljust="l")
for controller_group in passes:
subgraph, component_id, prev_node = draw_subgraph(
controller_group, component_id, style, prev_node, idx
)
stagegraph.add_subgraph(subgraph)
idx += 1
graph.add_subgraph(stagegraph)
output = make_output(graph, raw, filename)
return output
def draw_subgraph(controller_group, component_id, style, prev_node, idx):
"""Draw subgraph."""
import pydot
# label is the name of the flow controller parameter
label = "[{}] {}".format(idx, ", ".join(controller_group["flow_controllers"]))
# create the subgraph for this controller
subgraph = pydot.Cluster(str(component_id), label=label, fontname="helvetica", labeljust="l")
component_id += 1
for pass_ in controller_group["passes"]:
# label is the name of the pass
node = pydot.Node(
str(component_id),
label=str(type(pass_).__name__),
color=_get_node_color(pass_, style),
shape="rectangle",
fontname="helvetica",
)
subgraph.add_node(node)
component_id += 1
# the arguments that were provided to the pass when it was created
arg_spec = inspect.getfullargspec(pass_.__init__)
# 0 is the args, 1: to remove the self arg
args = arg_spec[0][1:]
num_optional = len(arg_spec[3]) if arg_spec[3] else 0
# add in the inputs to the pass
for arg_index, arg in enumerate(args):
nd_style = "solid"
# any optional args are dashed
# the num of optional counts from the end towards the start of the list
if arg_index >= (len(args) - num_optional):
nd_style = "dashed"
input_node = pydot.Node(
component_id,
label=arg,
color="black",
shape="ellipse",
fontsize=10,
style=nd_style,
fontname="helvetica",
)
subgraph.add_node(input_node)
component_id += 1
subgraph.add_edge(pydot.Edge(input_node, node))
# if there is a previous node, add an edge between them
if prev_node:
subgraph.add_edge(pydot.Edge(prev_node, node))
prev_node = node
return subgraph, component_id, prev_node
def make_output(graph, raw, filename):
"""Produce output for pass_manager."""
if raw:
if filename:
graph.write(filename, format="raw")
return None
else:
raise VisualizationError("if format=raw, then a filename is required.")
if not _optionals.HAS_PIL and filename:
# pylint says this isn't a method - it is
graph.write_png(filename)
return None
_optionals.HAS_PIL.require_now("pass manager drawer")
with tempfile.TemporaryDirectory() as tmpdirname:
from PIL import Image
tmppath = os.path.join(tmpdirname, "pass_manager.png")
# pylint says this isn't a method - it is
graph.write_png(tmppath)
image = Image.open(tmppath)
os.remove(tmppath)
if filename:
image.save(filename, "PNG")
return image
|
https://github.com/PabloMartinezAngerosa/QAOA-uniform-convergence
|
PabloMartinezAngerosa
|
import json
import csv
import numpy as np
import networkx as nx
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, assemble
from qiskit.quantum_info import Statevector
from qiskit.aqua.algorithms import NumPyEigensolver
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import op_converter
from qiskit.aqua.operators import WeightedPauliOperator
from qiskit.visualization import plot_histogram
from qiskit.providers.aer.extensions.snapshot_statevector import *
from thirdParty.classical import rand_graph, classical, bitstring_to_path, calc_cost
from utils import mapeo_grafo
from collections import defaultdict
from operator import itemgetter
from scipy.optimize import minimize
import matplotlib.pyplot as plt
LAMBDA = 10
SHOTS = 10000
UNIFORM_CONVERGENCE_SAMPLE = []
# returns the bit index for an alpha and j
def bit(i_city, l_time, num_cities):
return i_city * num_cities + l_time
# e^(cZZ)
def append_zz_term(qc, q_i, q_j, gamma, constant_term):
qc.cx(q_i, q_j)
qc.rz(2*gamma*constant_term,q_j)
qc.cx(q_i, q_j)
# e^(cZ)
def append_z_term(qc, q_i, gamma, constant_term):
qc.rz(2*gamma*constant_term, q_i)
# e^(cX)
def append_x_term(qc,qi,beta):
qc.rx(-2*beta, qi)
def get_not_edge_in(G):
N = G.number_of_nodes()
not_edge = []
for i in range(N):
for j in range(N):
if i != j:
buffer_tupla = (i,j)
in_edges = False
for edge_i, edge_j in G.edges():
if ( buffer_tupla == (edge_i, edge_j) or buffer_tupla == (edge_j, edge_i)):
in_edges = True
if in_edges == False:
not_edge.append((i, j))
return not_edge
def get_classical_simplified_z_term(G, _lambda):
# recorrer la formula Z con datos grafo se va guardando en diccionario que acumula si coinciden los terminos
N = G.number_of_nodes()
E = G.edges()
# z term #
z_classic_term = [0] * N**2
# first term
for l in range(N):
for i in range(N):
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += -1 * _lambda
# second term
for l in range(N):
for j in range(N):
for i in range(N):
if i < j:
# z_il
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += _lambda / 2
# z_jl
z_jl_index = bit(j, l, N)
z_classic_term[z_jl_index] += _lambda / 2
# third term
for i in range(N):
for l in range(N):
for j in range(N):
if l < j:
# z_il
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += _lambda / 2
# z_ij
z_ij_index = bit(i, j, N)
z_classic_term[z_ij_index] += _lambda / 2
# fourth term
not_edge = get_not_edge_in(G) # include order tuples ej = (1,0), (0,1)
for edge in not_edge:
for l in range(N):
i = edge[0]
j = edge[1]
# z_il
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += _lambda / 4
# z_j(l+1)
l_plus = (l+1) % N
z_jlplus_index = bit(j, l_plus, N)
z_classic_term[z_jlplus_index] += _lambda / 4
# fifthy term
weights = nx.get_edge_attributes(G,'weight')
for edge_i, edge_j in G.edges():
weight_ij = weights.get((edge_i,edge_j))
weight_ji = weight_ij
for l in range(N):
# z_il
z_il_index = bit(edge_i, l, N)
z_classic_term[z_il_index] += weight_ij / 4
# z_jlplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_j, l_plus, N)
z_classic_term[z_jlplus_index] += weight_ij / 4
# add order term because G.edges() do not include order tuples #
# z_i'l
z_il_index = bit(edge_j, l, N)
z_classic_term[z_il_index] += weight_ji / 4
# z_j'lplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_i, l_plus, N)
z_classic_term[z_jlplus_index] += weight_ji / 4
return z_classic_term
def get_classical_simplified_zz_term(G, _lambda):
# recorrer la formula Z con datos grafo se va guardando en diccionario que acumula si coinciden los terminos
N = G.number_of_nodes()
E = G.edges()
# zz term #
zz_classic_term = [[0] * N**2 for i in range(N**2) ]
# first term
for l in range(N):
for j in range(N):
for i in range(N):
if i < j:
# z_il
z_il_index = bit(i, l, N)
# z_jl
z_jl_index = bit(j, l, N)
zz_classic_term[z_il_index][z_jl_index] += _lambda / 2
# second term
for i in range(N):
for l in range(N):
for j in range(N):
if l < j:
# z_il
z_il_index = bit(i, l, N)
# z_ij
z_ij_index = bit(i, j, N)
zz_classic_term[z_il_index][z_ij_index] += _lambda / 2
# third term
not_edge = get_not_edge_in(G)
for edge in not_edge:
for l in range(N):
i = edge[0]
j = edge[1]
# z_il
z_il_index = bit(i, l, N)
# z_j(l+1)
l_plus = (l+1) % N
z_jlplus_index = bit(j, l_plus, N)
zz_classic_term[z_il_index][z_jlplus_index] += _lambda / 4
# fourth term
weights = nx.get_edge_attributes(G,'weight')
for edge_i, edge_j in G.edges():
weight_ij = weights.get((edge_i,edge_j))
weight_ji = weight_ij
for l in range(N):
# z_il
z_il_index = bit(edge_i, l, N)
# z_jlplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_j, l_plus, N)
zz_classic_term[z_il_index][z_jlplus_index] += weight_ij / 4
# add order term because G.edges() do not include order tuples #
# z_i'l
z_il_index = bit(edge_j, l, N)
# z_j'lplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_i, l_plus, N)
zz_classic_term[z_il_index][z_jlplus_index] += weight_ji / 4
return zz_classic_term
def get_classical_simplified_hamiltonian(G, _lambda):
# z term #
z_classic_term = get_classical_simplified_z_term(G, _lambda)
# zz term #
zz_classic_term = get_classical_simplified_zz_term(G, _lambda)
return z_classic_term, zz_classic_term
def get_cost_circuit(G, gamma, _lambda):
N = G.number_of_nodes()
N_square = N**2
qc = QuantumCircuit(N_square,N_square)
z_classic_term, zz_classic_term = get_classical_simplified_hamiltonian(G, _lambda)
# z term
for i in range(N_square):
if z_classic_term[i] != 0:
append_z_term(qc, i, gamma, z_classic_term[i])
# zz term
for i in range(N_square):
for j in range(N_square):
if zz_classic_term[i][j] != 0:
append_zz_term(qc, i, j, gamma, zz_classic_term[i][j])
return qc
def get_mixer_operator(G,beta):
N = G.number_of_nodes()
qc = QuantumCircuit(N**2,N**2)
for n in range(N**2):
append_x_term(qc, n, beta)
return qc
def invert_counts(counts):
return {k[::-1] :v for k,v in counts.items()}
def get_QAOA_circuit(G, beta, gamma, _lambda):
assert(len(beta)==len(gamma))
N = G.number_of_nodes()
qc = QuantumCircuit(N**2,N**2)
# init min mix state
qc.h(range(N**2))
p = len(beta)
for i in range(p):
qc = qc.compose(get_cost_circuit(G, gamma[i], _lambda))
qc = qc.compose(get_mixer_operator(G, beta[i]))
qc.barrier(range(N**2))
qc.snapshot_statevector("final_state")
qc.measure(range(N**2),range(N**2))
return qc
def tsp_obj(x, G):
# obtenemos el valor evaluado en f(x_1, x_2,... x_n)
z_classic_term, zz_classic_term = get_classical_simplified_hamiltonian(G, LAMBDA)
cost = 0
# z term
for index in range(len(x)):
z = (int(x[index]) * 2 ) -1
cost += z_classic_term[index] * z
## zz term
for i in range(len(x)):
z_1 = (int(x[i]) * 2 ) -1
for j in range(len(x)):
z_2 = (int(x[j]) * 2 ) -1
cost += zz_classic_term[i][j] * z_1 * z_1
return cost
# Sample expectation value
def compute_tsp_energy(counts, G):
energy = 0
get_counts = 0
total_counts = 0
for meas, meas_count in counts.items():
obj_for_meas = tsp_obj(meas, G)
energy += obj_for_meas*meas_count
total_counts += meas_count
return energy/total_counts
# Sample expectation value
def compute_tsp_energy_2(counts, G):
energy = 0
get_counts = 0
total_counts = 0
for meas, meas_count in counts.items():
obj_for_meas = tsp_obj_2(meas, G, LAMBDA)
energy += obj_for_meas*meas_count
total_counts += meas_count
mean = energy/total_counts
return mean
def tsp_obj_2(x, G,_lambda):
# obtenemos el valor evaluado en f(x_1, x_2,... x_n)
not_edge = get_not_edge_in(G)
N = G.number_of_nodes()
tsp_cost=0
#Distancia
weights = nx.get_edge_attributes(G,'weight')
for edge_i, edge_j in G.edges():
weight_ij = weights.get((edge_i,edge_j))
weight_ji = weight_ij
for l in range(N):
# x_il
x_il_index = bit(edge_i, l, N)
# x_jlplus
l_plus = (l+1) % N
x_jlplus_index = bit(edge_j, l_plus, N)
tsp_cost+= int(x[x_il_index]) * int(x[x_jlplus_index]) * weight_ij
# add order term because G.edges() do not include order tuples #
# x_i'l
x_il_index = bit(edge_j, l, N)
# x_j'lplus
x_jlplus_index = bit(edge_i, l_plus, N)
tsp_cost += int(x[x_il_index]) * int(x[x_jlplus_index]) * weight_ji
#Constraint 1
for l in range(N):
penal1 = 1
for i in range(N):
x_il_index = bit(i, l, N)
penal1 -= int(x[x_il_index])
tsp_cost += _lambda * penal1**2
#Contstraint 2
for i in range(N):
penal2 = 1
for l in range(N):
x_il_index = bit(i, l, N)
penal2 -= int(x[x_il_index])
tsp_cost += _lambda*penal2**2
#Constraint 3
for edge in not_edge:
for l in range(N):
i = edge[0]
j = edge[1]
# x_il
x_il_index = bit(i, l, N)
# x_j(l+1)
l_plus = (l+1) % N
x_jlplus_index = bit(j, l_plus, N)
tsp_cost += int(x[x_il_index]) * int(x[x_jlplus_index]) * _lambda
return tsp_cost
def get_black_box_objective(G,p):
backend = Aer.get_backend('qasm_simulator')
def f(theta):
beta = theta[:p]
gamma = theta[p:]
qc = get_QAOA_circuit(G, beta, gamma, LAMBDA)
counts = execute(qc, backend, seed_simulator=10, shots=SHOTS).result().get_counts()
return compute_tsp_energy(invert_counts(counts),G)
return f
def get_black_box_objective_2(G,p):
backend = Aer.get_backend('qasm_simulator')
sim = Aer.get_backend('aer_simulator')
def f(theta):
beta = theta[:p]
gamma = theta[p:]
qc = get_QAOA_circuit(G, beta, gamma, LAMBDA)
result = execute(qc, backend, seed_simulator=10, shots= SHOTS).result()
final_state_vector = result.data()["snapshots"]["statevector"]["final_state"][0]
state_vector = Statevector(final_state_vector)
probabilities = state_vector.probabilities()
probabilities_states = invert_counts(state_vector.probabilities_dict())
expected_value = 0
for state,probability in probabilities_states.items():
cost = tsp_obj_2(state, G, LAMBDA)
expected_value += cost*probability
counts = result.get_counts()
mean = compute_tsp_energy_2(invert_counts(counts),G)
global UNIFORM_CONVERGENCE_SAMPLE
UNIFORM_CONVERGENCE_SAMPLE.append({
"beta" : beta,
"gamma" : gamma,
"counts" : counts,
"mean" : mean,
"probabilities" : probabilities,
"expected_value" : expected_value
})
return mean
return f
def compute_tsp_min_energy_2(counts, G):
energy = 0
get_counts = 0
total_counts = 0
min = 1000000000000000000000
index = 0
min_meas = ""
for meas, meas_count in counts.items():
index = index + 1
obj_for_meas = tsp_obj_2(meas, G, LAMBDA)
if obj_for_meas < min:
min = obj_for_meas
min_meas = meas
return index, min, min_meas
def compute_tsp_min_energy_1(counts, G):
energy = 0
get_counts = 0
total_counts = 0
min = 1000000000000000000000
index = 0
min_meas = ""
for meas, meas_count in counts.items():
index = index + 1
obj_for_meas = tsp_obj(meas, G)
if obj_for_meas < min:
min = obj_for_meas
min_meas = meas
return index, min, min_meas
def test_counts_2(counts, G):
mean_energy2 = compute_tsp_energy_2(invert_counts(counts),G)
cantidad, min, min_meas = compute_tsp_min_energy_2(invert_counts(counts),G)
print("*************************")
print("En el algoritmo 2 (Marina) el valor esperado como resultado es " + str(mean_energy2))
print("El valor minimo de todos los evaluados es " + str(min) + " se evaluaron un total de " + str(cantidad))
print("El vector minimo es " + min_meas)
def test_counts_1(counts, G):
mean_energy1 = compute_tsp_energy(invert_counts(counts),G)
cantidad, min, min_meas = compute_tsp_min_energy_1(invert_counts(counts),G)
print("*************************")
print("En el algoritmo 1 (Pablo) el valor esperado como resultado es " + str(mean_energy1))
print("El valor minimo de todos los evaluados es " + str(min) + " se evaluaron un total de " + str(cantidad))
print("El vector minimo es " + min_meas)
def test_solution(grafo=None, p=7):
global UNIFORM_CONVERGENCE_SAMPLE
UNIFORM_CONVERGENCE_SAMPLE = []
if grafo == None:
cantidad_ciudades = 2
pesos, conexiones = None, None
mejor_camino = None
while not mejor_camino:
pesos, conexiones = rand_graph(cantidad_ciudades)
mejor_costo, mejor_camino = classical(pesos, conexiones, loop=False)
G = mapeo_grafo(conexiones, pesos)
print(mejor_costo)
print(mejor_camino)
print(G.edges())
print(G.nodes())
else:
G = grafo
bounds = []
intial_random = []
for i in range(p):
bounds.append((0, np.pi))
intial_random.append(np.random.uniform(0,np.pi))
for i in range(p):
bounds.append((0, np.pi * 2))
intial_random.append(np.random.uniform(0,2*np.pi))
init_point = np.array(intial_random)
# Marina Solutions
obj = get_black_box_objective_2(G,p)
res_sample_2 = minimize(obj, init_point, method="COBYLA", options={"maxiter":2500,"disp":True})
theta_2 = res_sample_2.x
beta = theta_2[:p]
gamma = theta_2[p:]
_lambda = LAMBDA
qc = get_QAOA_circuit(G, beta, gamma, LAMBDA)
backend = Aer.get_backend('qasm_simulator')
job_2 = execute(qc, backend, shots = SHOTS)
resutls_2 = job_2.result().get_counts()
test_counts_2(resutls_2, G)
return job_2, G, UNIFORM_CONVERGENCE_SAMPLE
def create_multiple_p_mismo_grafo():
header = ['p', 'state', 'probability', 'mean']
length_p = 10
with open('qaoa_multiple_p.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(header)
first_p = False
uniform_convergence_sample = []
for p in range(length_p):
p = p+1
if first_p == False:
job_2, G, uniform_convergence_sample = test_solution(p=p)
first_p = True
else:
job_2, G, uniform_convergence_sample = test_solution(grafo=G, p=p)
# Sort the JSON data based on the value of the brand key
uniform_convergence_sample.sort(key=lambda x: x["mean"])
mean = uniform_convergence_sample[0]["mean"]
print(mean)
state = 0
for probability in uniform_convergence_sample[0]["probabilities"]:
state += 1
writer.writerow([p,state, probability,mean])
def create_multiple_p_mismo_grafo_multiples_instanncias():
header = ['instance','p','distance', 'mean']
length_p = 4
length_instances = 10
with open('qaoa_multiple_p_distance.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(header)
instance_index = 0
for instance in range(length_instances):
instance_index += 1
first_p = False
UNIFORM_CONVERGENCE_P = []
uniform_convergence_sample = []
for p in range(length_p):
p = p+1
print("p es igual " + str(p))
if first_p == False:
print("Vuelve a llamar a test_solution")
job_2, G, uniform_convergence_sample = test_solution(p=p)
first_p = True
else:
job_2, G, uniform_convergence_sample = test_solution(grafo=G, p=p)
# Sort the JSON data based on the value of the brand key
uniform_convergence_sample.sort(key=lambda x: x["expected_value"])
convergence_min = uniform_convergence_sample[0]
UNIFORM_CONVERGENCE_P.append({
"mean":convergence_min["expected_value"],
"probabilities": convergence_min["probabilities"]
})
cauchy_function_nk = UNIFORM_CONVERGENCE_P[len(UNIFORM_CONVERGENCE_P) - 1]
p_index = 0
for p_state in UNIFORM_CONVERGENCE_P:
p_index += 1
print(p_index)
mean = p_state["mean"]
distance_p_cauchy_function_nk = np.max(np.abs(cauchy_function_nk["probabilities"] - p_state["probabilities"]))
writer.writerow([instance_index, p_index, distance_p_cauchy_function_nk, mean])
if __name__ == '__main__':
create_multiple_p_mismo_grafo_multiples_instanncias()
def defult_init():
cantidad_ciudades = 2
pesos, conexiones = None, None
mejor_camino = None
while not mejor_camino:
pesos, conexiones = rand_graph(cantidad_ciudades)
mejor_costo, mejor_camino = classical(pesos, conexiones, loop=False)
G = mapeo_grafo(conexiones, pesos)
print(mejor_costo)
print(mejor_camino)
print(G.edges())
print(G.nodes())
print("labels")
labels = nx.get_edge_attributes(G,'weight')
p = 5
obj = get_black_box_objective(G,p)
init_point = np.array([0.8,2.2,0.83,2.15,0.37,2.4,6.1,2.2,3.8,6.1])
# Marina Solutions
obj = get_black_box_objective_2(G,p)
res_sample = minimize(obj, init_point,method="COBYLA",options={"maxiter":2500,"disp":True})
print(res_sample)
theta_2 = [0.72685401, 2.15678239, 0.86389827, 2.19403121, 0.26916675, 2.19832144, 7.06651453, 3.20333137, 3.81301611, 6.08893568]
theta_1 = [0.90644898, 2.15994212, 1.8609325 , 2.14042604, 1.49126214, 2.4127999, 6.10529434, 2.18238732, 3.84056674, 6.07097744]
beta = theta_1[:p]
gamma = theta_1[p:]
qc = get_QAOA_circuit(G, beta, gamma, LAMBDA)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend)
beta = theta_2[:p]
gamma = theta_2[p:]
qc = get_QAOA_circuit(G, beta, gamma, LAMBDA)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend)
|
https://github.com/AbeerVaishnav13/Quantum-Programs
|
AbeerVaishnav13
|
from qiskit.quantum_info import Operator
from qiskit import QuantumCircuit, Aer, execute
import numpy as np
from qiskit.visualization import plot_state_qsphere
def phase_oracle(n, indices_to_mark, name = 'Oracle'):
# create a qAertum circuit on n qubits
qc = QuantumCircuit(n, name=name)
### WRITE YOUR CODE BETWEEN THESE LINES - START
oracle_matrix = np.identity(2**n)
for i in indices_to_mark:
oracle_matrix[i][i] = -1
### WRITE YOUR CODE BETWEEN THESE LINES - END
# convert your matrix (called oracle_matrix) into an operator, and add it to the quantum circuit
qc.unitary(Operator(oracle_matrix), range(n))
return qc
def diffuser(n):
# create a quantum circuit on n qubits
qc = QuantumCircuit(n, name='Diffuser')
### WRITE YOUR CODE BETWEEN THESE LINES - START
qc.h(range(n))
qc.append(phase_oracle(n, [0]), range(n))
qc.h(range(n))
### WRITE YOUR CODE BETWEEN THESE LINES - END
return qc
def simulate_and_display(qc, step, it):
sim = Aer.get_backend('statevector_simulator')
res = execute(qc, backend=sim, shots=1).result()
display(plot_state_qsphere(res.get_statevector(qc)))#.savefig(('./pics/out_'+step+it+'.png'))
def Grover(n, indices_of_marked_elements):
# Create a quantum circuit on n qubits
qc = QuantumCircuit(n, n)
# Determine r
r = int(np.floor(np.pi/4*np.sqrt(2**n/len(indices_of_marked_elements))))
print(f'{n} qubits, basis states {indices_of_marked_elements} marked, {r} rounds')
# Display the initial state
simulate_and_display(qc, 'init', '0')
# step 1: apply Hadamard gates on all qubits
qc.h(range(n))
simulate_and_display(qc, 'hadamard', '0')
# step 2: apply r rounds of the phase oracle and the diffuser
for it in range(r):
qc.append(phase_oracle(n, indices_of_marked_elements), range(n))
simulate_and_display(qc, 'oracle', str(it))
qc.append(diffuser(n), range(n))
simulate_and_display(qc, 'diff', str(it))
# step 3: measure all qubits
qc.measure(range(n), range(n))
return qc
mycircuit = Grover(5, [2, 29])
mycircuit.draw(output='text')
from qiskit import Aer, execute
simulator = Aer.get_backend('qasm_simulator')
counts = execute(mycircuit, backend=simulator, shots=1000).result().get_counts(mycircuit)
from qiskit.visualization import plot_histogram
plot_histogram(counts)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit.utils import algorithm_globals
from qiskit.algorithms.minimum_eigensolvers import QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import COBYLA
from qiskit.primitives import Sampler
from qiskit_optimization.applications.vertex_cover import VertexCover
import networkx as nx
seed = 123
algorithm_globals.random_seed = seed
graph = nx.random_regular_graph(d=3, n=6, seed=seed)
pos = nx.spring_layout(graph, seed=seed)
prob = VertexCover(graph)
prob.draw(pos=pos)
qp = prob.to_quadratic_program()
print(qp.prettyprint())
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
prob.draw(result, pos=pos)
# QAOA
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, sampler=Sampler(), optimizer=COBYLA()))
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
print("\ntime:", result.min_eigen_solver_result.optimizer_time)
prob.draw(result, pos=pos)
from qiskit_optimization.applications import Knapsack
prob = Knapsack(values=[3, 4, 5, 6, 7], weights=[2, 3, 4, 5, 6], max_weight=10)
qp = prob.to_quadratic_program()
print(qp.prettyprint())
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
# QAOA
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, sampler=Sampler(), optimizer=COBYLA()))
result = meo.solve(qp)
print(result.prettyprint())
print("\nsolution:", prob.interpret(result))
print("\ntime:", result.min_eigen_solver_result.optimizer_time)
from qiskit_optimization.converters import QuadraticProgramToQubo
# the same knapsack problem instance as in the previous section
prob = Knapsack(values=[3, 4, 5, 6, 7], weights=[2, 3, 4, 5, 6], max_weight=10)
qp = prob.to_quadratic_program()
print(qp.prettyprint())
# intermediate QUBO form of the optimization problem
conv = QuadraticProgramToQubo()
qubo = conv.convert(qp)
print(qubo.prettyprint())
# qubit Hamiltonian and offset
op, offset = qubo.to_ising()
print(f"num qubits: {op.num_qubits}, offset: {offset}\n")
print(op)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
from qiskit.primitives import Sampler
from qiskit_optimization.algorithms import GroverOptimizer, MinimumEigenOptimizer
from qiskit_optimization.translators import from_docplex_mp
from docplex.mp.model import Model
model = Model()
x0 = model.binary_var(name="x0")
x1 = model.binary_var(name="x1")
x2 = model.binary_var(name="x2")
model.minimize(-x0 + 2 * x1 - 3 * x2 - 2 * x0 * x2 - 1 * x1 * x2)
qp = from_docplex_mp(model)
print(qp.prettyprint())
grover_optimizer = GroverOptimizer(6, num_iterations=10, sampler=Sampler())
results = grover_optimizer.solve(qp)
print(results.prettyprint())
exact_solver = MinimumEigenOptimizer(NumPyMinimumEigensolver())
exact_result = exact_solver.solve(qp)
print(exact_result.prettyprint())
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
circuit_drawer(qc, output='mpl', style={'backgroundcolor': '#EEEEEE'})
|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit import compiler, BasicAer, QuantumRegister
from qiskit.converters import circuit_to_dag
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
def convert_to_basis_gates(circuit):
# unroll the circuit using the basis u1, u2, u3, cx, and id gates
unroller = Unroller(basis=['u1', 'u2', 'u3', 'cx', 'id'])
pm = PassManager(passes=[unroller])
qc = compiler.transpile(circuit, BasicAer.get_backend('qasm_simulator'), pass_manager=pm)
return qc
def is_qubit(qb):
# check if the input is a qubit, which is in the form (QuantumRegister, int)
return isinstance(qb, tuple) and isinstance(qb[0], QuantumRegister) and isinstance(qb[1], int)
def is_qubit_list(qbs):
# check if the input is a list of qubits
for qb in qbs:
if not is_qubit(qb):
return False
return True
def summarize_circuits(circuits):
"""Summarize circuits based on QuantumCircuit, and four metrics are summarized.
Number of qubits and classical bits, and number of operations and depth of circuits.
The average statistic is provided if multiple circuits are inputed.
Args:
circuits (QuantumCircuit or [QuantumCircuit]): the to-be-summarized circuits
"""
if not isinstance(circuits, list):
circuits = [circuits]
ret = ""
ret += "Submitting {} circuits.\n".format(len(circuits))
ret += "============================================================================\n"
stats = np.zeros(4)
for i, circuit in enumerate(circuits):
dag = circuit_to_dag(circuit)
depth = dag.depth()
width = dag.width()
size = dag.size()
classical_bits = dag.num_cbits()
op_counts = dag.count_ops()
stats[0] += width
stats[1] += classical_bits
stats[2] += size
stats[3] += depth
ret = ''.join([ret, "{}-th circuit: {} qubits, {} classical bits and {} operations with depth {}\n op_counts: {}\n".format(
i, width, classical_bits, size, depth, op_counts)])
if len(circuits) > 1:
stats /= len(circuits)
ret = ''.join([ret, "Average: {:.2f} qubits, {:.2f} classical bits and {:.2f} operations with depth {:.2f}\n".format(
stats[0], stats[1], stats[2], stats[3])])
ret += "============================================================================\n"
return ret
|
https://github.com/AmanSinghBhogal/NasaNEO_Classification
|
AmanSinghBhogal
|
# pip install qiskit
# pip install qiskit.Aer
import pandas as pd
from sklearn.model_selection import train_test_split
from qiskit import QuantumCircuit, execute, Aer
from qiskit import ClassicalRegister, QuantumRegister
from qiskit.visualization import plot_histogram
import matplotlib.pyplot as plt
from math import asin, sqrt, pi, log
from statistics import mean
import pandas as pd
from sklearn.metrics import confusion_matrix, precision_score, recall_score
# Loaded the Dataset
data = pd.read_csv('neo_v2.csv')
# Writing code for Pre-Procesing
cat_max_dia = []
cat_RV = []
cat_miss = []
max_dia_mean = mean(data['est_diameter_max'])
Rv_mean = mean(data['relative_velocity'])
print("\n\nThe Mean Max Diameter is: ", max_dia_mean)
print("The Mean Relative Velocity is: ", Rv_mean)
for i,j, z in zip(data['est_diameter_max'], data['relative_velocity'], data['miss_distance']):
if i>=max_dia_mean:
cat_max_dia.append("Large")
else :
cat_max_dia.append("Small")
if j>=Rv_mean:
cat_RV.append("Fast")
else:
cat_RV.append("Slow")
if z>=0 and z< 25000000:
cat_miss.append("Less")
elif z>= 25000000 and z<50000000:
cat_miss.append("Medium")
else:
cat_miss.append("More")
processed_data = pd.DataFrame(list(zip(data['est_diameter_max'], data['relative_velocity'],data['miss_distance'], cat_max_dia, cat_RV,cat_miss, data['hazardous'])),columns=['Max_Diameter','Relative_Velocity','Miss_Distance', 'Categorized_Diameter', 'Categorized_Relative_Vel','Categorised_Miss_Distance','Hazardous'])
# Saving the Processed Data to get a better view
processed_data.to_csv('processedData.csv')
# Splitted the Dataset into Training and Testing
train_input, test_input = train_test_split(processed_data, test_size=0.3, random_state=50)
# Printing Number of records in training and testing
print("There are {} Training Records and {} Testing Records".format(train_input.shape[0], test_input.shape[0]))
# Printing the Number of True and False Records in Train and Test Dataset
print("Train Dataset: No of True: {}, No. False: {}".format(len(train_input[train_input['Hazardous'] == True]), len(train_input[train_input['Hazardous'] == False])))
print("Test Dataset: No of True: {}, No. False: {}".format(len(test_input[test_input['Hazardous'] == True]), len(test_input[test_input['Hazardous'] == False])))
# Probability of Max Diameter:
def prob_hazard_calc(df, category_name, category_val):
pop = df[df[category_name] == category_val]
hazard_pop = pop[pop['Hazardous'] == True]
p_pop = len(hazard_pop)/len(pop)
return p_pop, len(pop)
# For Max Diameter:
# for small:
p_small, pop_small = prob_hazard_calc(train_input, "Categorized_Diameter", "Small")
print(p_small, pop_small)
# for Large:
p_large, pop_large = prob_hazard_calc(train_input, "Categorized_Diameter", "Large")
print(p_large, pop_large)
print("\n\nPrinting Chances of Max Diameter Objects given they are hazardous:\n")
print("{} Small Diameter objects had {} chances of being hazardous".format(pop_small, p_small))
print("{} Large Diameter objects had {} chances of being hazardous".format(pop_large, p_large))
# For Relative Velocity:
# for Slow:
p_slow, pop_slow = prob_hazard_calc(train_input, "Categorized_Relative_Vel", "Slow")
# print(p_slow)
# for Fast:
p_fast, pop_fast = prob_hazard_calc(train_input, "Categorized_Relative_Vel", "Fast")
# print(p_fast)
print("\n\nPrinting Chances of Relative Velocity Objects given they are hazardous:\n")
print("{} Slow Relative Velocity objects had {} chances of being hazardous".format(pop_slow, p_slow))
print("{} Fast Relative Velocity objects had {} chances of being hazardous".format(pop_fast, p_fast))
# For Miss Distance:
# For Less
p_less, pop_less = prob_hazard_calc(train_input, "Categorised_Miss_Distance", "Less")
# print(p_less, pop_less)
# For Medium
p_med, pop_med = prob_hazard_calc(train_input, "Categorised_Miss_Distance", "Medium")
# print(p_med, pop_med)
# For More:
p_more, pop_more = prob_hazard_calc(train_input, "Categorised_Miss_Distance", "More")
# print(p_more, pop_more)
print("\n\nPrinting Chances of Miss Distance Objects given they are hazardous:\n")
print("{} Less Miss Distance objects had {} chances of being hazardous".format(pop_less, p_less))
print("{} Medium Miss Distance objects had {} chances of being hazardous".format(pop_med, p_med))
print("{} More Miss Distance objects had {} chances of being hazardous".format(pop_more, p_more))
# Specifying the marginal probability
def prob_to_angle(prob):
"""
Converts a given P(psi) value into an equivalent theta value.
"""
return 2*asin(sqrt(prob))
# Initialize the quantum circuit
qc = QuantumCircuit(3)
# Set qubit0 to p_large i.e for Max Diameter
qc.ry(prob_to_angle(p_large), 0)
# Set qubit1 to p_fast i.e for Relative Velocity
qc.ry(prob_to_angle(p_fast), 1)
# Defining the CCRY‐gate:
def ccry(qc, theta, control1, control2, controlled):
qc.cry(theta/2, control2, controlled)
qc.cx(control1, control2)
qc.cry(-theta/2, control2, controlled)
qc.cx(control1, control2)
qc.cry(theta/2, control1, controlled)
# Calculating the conditional probabilities
# fast Relative Velocity and large Diameter
population_fast=train_input[train_input.Categorized_Relative_Vel.eq("Fast")]
population_fast_large= population_fast[population_fast.Categorized_Diameter.eq("Large")]
hazardous_fast_large = population_fast_large[population_fast_large.Hazardous.eq(1)]
p_hazardous_fast_large=len(hazardous_fast_large)/len(population_fast_large)
# print(p_hazardous_fast_large)
# fast Relative Velocity and small Diameter
population_fast_small = population_fast[population_fast.Categorized_Diameter.eq("Small")]
hazardous_fast_small = population_fast_small[population_fast_small.Hazardous.eq(1)]
p_hazardous_fast_small=len(hazardous_fast_small)/len(population_fast_small)
# Slow Relative Velocity and Large Diameter
population_slow = train_input[train_input.Categorized_Relative_Vel.eq("Slow")]
population_slow_large = population_slow[population_slow.Categorized_Diameter.eq("Large")]
hazardous_slow_large=population_slow_large[population_slow_large.Hazardous.eq(1)]
p_hazardous_slow_large=len(hazardous_slow_large)/len(population_slow_large)
# Slow Relative Velocity and Small Diameter
population_slow_small = population_slow[population_slow.Categorized_Diameter.eq("Small")]
hazardous_slow_small = population_slow_small[population_slow_small.Hazardous.eq(1)]
p_hazardous_slow_small=len(hazardous_slow_small)/len(population_slow_small)
# Initializing the child node:
# set state |00> to conditional probability of slow RV and small Diameter
qc.x(0)
qc.x(1)
ccry(qc,prob_to_angle(p_hazardous_slow_small),0,1,2)
qc.x(0)
qc.x(1)
# set state |01> to conditional probability of slow RV and large Diameter
qc.x(0)
ccry(qc,prob_to_angle(p_hazardous_slow_large),0,1,2)
qc.x(0)
# set state |10> to conditional probability of fast RV and small Diameter
qc.x(1)
ccry(qc,prob_to_angle(p_hazardous_fast_small),0,1,2)
qc.x(1)
# set state |11> to conditional probability of fast RV and large Diameter
ccry(qc,prob_to_angle(p_hazardous_fast_large),0,1,2)
# Circuit execution
# execute the qc
results = execute(qc,Aer.get_backend('statevector_simulator')).result().get_counts()
plot_histogram(results)
# Quantum circuit with classical register
qr = QuantumRegister(3)
cr = ClassicalRegister(1)
qc = QuantumCircuit(qr, cr)
# Listing Run the circuit including a measurement
# Set qubit0 to p_small i.e for Max Diameter
qc.ry(prob_to_angle(p_large), 0)
# Set qubit1 to p_fast i.e for Relative Velocity
qc.ry(prob_to_angle(p_fast), 1)
# set state |00> to conditional probability of slow RV and small Diameter
qc.x(0)
qc.x(1)
ccry(qc,prob_to_angle(p_hazardous_slow_small),0,1,2)
qc.x(0)
qc.x(1)
# set state |01> to conditional probability of slow RV and large Diameter
qc.x(0)
ccry(qc,prob_to_angle(p_hazardous_slow_large),0,1,2)
qc.x(0)
# set state |10> to conditional probability of fast RV and small Diameter
qc.x(1)
ccry(qc,prob_to_angle(p_hazardous_fast_small),0,1,2)
qc.x(1)
# set state |11> to conditional probability of fast RV and large Diameter
ccry(qc,prob_to_angle(p_hazardous_fast_large),0,1,2)
qc.measure(qr[2], cr[0])
results = execute(qc,Aer.get_backend('qasm_simulator'), shots=1000).result().get_counts()
plot_histogram(results)
# Dataset with missing values
data = [
(1, 1), (1, 1), (0, 0), (0, 0), (0, 0), (0, None), (0, 1), (1, 0)
]
# The log‐likelihood function adapted for our data
def log_likelihood(data, prob_a_b, prob_a_nb, prob_na_b, prob_na_nb):
def get_prob(point):
if point[0] == 1 and point[1] == 1:
return log(prob_a_b)
elif point[0] == 1 and point[1] == 0:
return log(prob_a_nb)
elif point[0] == 0 and point[1] == 1:
return log(prob_na_b)
elif point[0] == 0 and point[1] == 0:
return log(prob_na_nb)
else:
return log(prob_na_b+prob_na_nb)
return sum(map(get_prob, data))
# The as‐pqc function
def as_pqc(cnt_quantum, with_qc, cnt_classical=1, shots=1, hist=False, measure=False):
# Prepare the circuit with qubits and a classical bit to hold the measurement
qr = QuantumRegister(cnt_quantum)
cr = ClassicalRegister(cnt_classical)
qc = QuantumCircuit(qr, cr) if measure else QuantumCircuit(qr)
with_qc(qc, qr=qr, cr=cr)
results = execute(
qc,
Aer.get_backend('statevector_simulator') if measure is False else Aer.get_backend('qasm_simulator'),
shots=shots
).result().get_counts()
return plot_histogram(results, figsize=(12,4)) if hist else results
# The quantum Bayesian network
def qbn(data, hist=True):
def circuit(qc, qr=None, cr=None):
list_a = list(filter(lambda item: item[0] == 1, data))
list_na = list(filter(lambda item: item[0] == 0, data))
# set the marginal probability of A
qc.ry(prob_to_angle(
len(list_a) / len(data)
), 0)
# set the conditional probability of NOT A and (B / not B)
qc.x(0)
qc.cry(prob_to_angle(
sum(list(map(lambda item: item[1], list_na))) / len(list_na)
),0,1)
qc.x(0)
# set the conditional probability of A and (B / not B)
qc.cry(prob_to_angle(
sum(list(map(lambda item: item[1], list_a))) / len(list_a)
),0,1)
return as_pqc(2, circuit, hist=hist)
# Ignoring the missing data
qbn(list(filter(lambda item: item[1] is not None ,data)))
# Calculate the log‐likelihood when ignoring the missing data
def eval_qbn(model, prepare_data, data):
results = model(prepare_data(data), hist=False)
return (
round(log_likelihood(data,
results['11'], # prob_a_b
results['01'], # prob_a_nb
results['10'], # prob_na_b
results['00'] # prob_na_nb
), 3),
results['10'] / (results['10'] + results['00'])
)
print(eval_qbn(qbn, lambda dataset: list(filter(lambda item: item[1] is not None ,dataset)), data))
# Calculate the log‐likelihood when filling in 0
print(eval_qbn(qbn, lambda dataset: list(map(lambda item: item if item[1] is not None else (item[0], 0) ,dataset)), data))
# Evaluating the guess
print(eval_qbn(qbn, lambda dataset: list(map(lambda item: item if item[1] is not None else (item[0], 0.5) ,dataset)), data))
# Refining the model
print(eval_qbn(qbn, lambda dataset: list(map(lambda item: item if item[1] is not None else (item[0], 0.3) ,dataset)), data))
# Further refining the model
print(eval_qbn(qbn, lambda dataset: list(map(lambda item: item if item[1] is not None else (item[0], 0.252) ,dataset)), data))
# Another iteration
print(eval_qbn(qbn, lambda dataset: list(map(lambda item: item if item[1] is not None else (item[0], 0.252) ,dataset)), data))
# positions of the qubits
QPOS_dia = 0
QPOS_RV = 1
def apply_islarge_fast(qc):
# set the marginal probability of large Diameter
qc.ry(prob_to_angle(p_large), QPOS_dia)
# set the marginal probability of Fast Relative Velocity
qc.ry(prob_to_angle(p_fast), QPOS_RV)
# Defining the CCRY‐gate:
def ccry(qc, theta, control1, control2, controlled):
qc.cry(theta/2, control2, controlled)
qc.cx(control1, control2)
qc.cry(-theta/2, control2, controlled)
qc.cx(control1, control2)
qc.cry(theta/2, control1, controlled)
# Listing Represent the norm
# position of the qubit representing the norm
QPOS_NORM = 2
def apply_norm(qc, norm_params):
"""
norm_params = {
'p_norm_small_slow': 0.25,
'p_norm_small_fast': 0.35,
'p_norm_large_slow': 0.45,
'p_norm_large_fast': 0.55
}
"""
# set the conditional probability of Norm given small/slow
qc.x(QPOS_dia)
qc.x(QPOS_RV)
ccry(qc, prob_to_angle(
norm_params['p_norm_small_slow']
),QPOS_dia, QPOS_RV, QPOS_NORM)
qc.x(QPOS_dia)
qc.x(QPOS_RV)
# set the conditional probability of Norm given small/fast
qc.x(QPOS_dia)
ccry(qc, prob_to_angle(
norm_params['p_norm_small_fast']
),QPOS_dia, QPOS_RV, QPOS_NORM)
qc.x(QPOS_dia)
# set the conditional probability of Norm given large/slow
qc.x(QPOS_RV)
ccry(qc, prob_to_angle(
norm_params['p_norm_large_slow']
),QPOS_dia, QPOS_RV, QPOS_NORM)
qc.x(QPOS_RV)
# set the conditional probability of Norm given large/fast
ccry(qc, prob_to_angle(
norm_params['p_norm_large_fast']
),QPOS_dia, QPOS_RV, QPOS_NORM)
# Listing Calculate the probabilities related to the miss distance
pop_more = train_input[train_input.Categorised_Miss_Distance.eq("More")]
hazardous_more = round(len(pop_more[pop_more.Hazardous.eq(1)])/len(pop_more), 2)
p_more = round(len(pop_more)/len(train_input), 2)
pop_med = train_input[train_input.Categorised_Miss_Distance.eq("Medium")]
hazardous_med = round(len(pop_med[pop_med.Hazardous.eq(1)])/len(pop_med), 2)
p_med = round(len(pop_med)/len(train_input), 2)
pop_less = train_input[train_input.Categorised_Miss_Distance.eq("Less")]
hazardous_less = round(len(pop_less[pop_less.Hazardous.eq(1)])/len(pop_less), 2)
p_less = round(len(pop_less)/len(train_input), 2)
print("More Miss Distance: {} of the Objects, hazardous: {}".format(p_more , hazardous_more))
print("Medium Miss Distance: {} of the Objects, hazardous: {}".format(p_med,hazardous_med))
print("Less Miss Distance: {} of the Objects, hazardous: {}".format(p_less,hazardous_less))
# Listing Represent the miss-distance
# positions of the qubits
QPOS_more = 3
QPOS_med = 4
QPOS_less = 5
def apply_class(qc):
# set the marginal probability of miss-distance=more
qc.ry(prob_to_angle(p_more), QPOS_more)
qc.x(QPOS_more)
# set the marginal probability of Pclass=2nd
qc.cry(prob_to_angle(p_med/(1-p_more)), QPOS_more, QPOS_med)
# set the marginal probability of Pclass=3rd
qc.x(QPOS_med)
ccry(qc, prob_to_angle(p_less/(1-p_more-p_med)), QPOS_more, QPOS_med, QPOS_less)
qc.x(QPOS_med)
qc.x(QPOS_more)
# Listing Represent hazardous
# position of the qubit
QPOS_hazardous = 6
def apply_hazardous(qc, hazardous_params):
"""
hazardous_params = {
'p_hazardous_favoured_more': 0.3,
'p_hazardous_favoured_med': 0.4,
'p_hazardous_favoured_less': 0.5,
'p_hazardous_unfavoured_more': 0.6,
'p_hazardous_unfavoured_med': 0.7,
'p_hazardous_unfavoured_less': 0.8
}
"""
# set the conditional probability of Survival given unfavored by norm
qc.x(QPOS_NORM)
ccry(qc, prob_to_angle(
hazardous_params['p_hazardous_unfavoured_more']
),QPOS_NORM, QPOS_more, QPOS_hazardous)
ccry(qc, prob_to_angle(
hazardous_params['p_hazardous_unfavoured_med']
),QPOS_NORM, QPOS_med, QPOS_hazardous)
ccry(qc, prob_to_angle(
hazardous_params['p_hazardous_unfavoured_less']
),QPOS_NORM, QPOS_less, QPOS_hazardous)
qc.x(QPOS_NORM)
# set the conditional probability of hazardous given favored by norm
ccry(qc, prob_to_angle(
hazardous_params['p_hazardous_favoured_more']
),QPOS_NORM, QPOS_more, QPOS_hazardous)
ccry(qc, prob_to_angle(
hazardous_params['p_hazardous_favoured_med']
),QPOS_NORM, QPOS_med, QPOS_hazardous)
ccry(qc, prob_to_angle(
hazardous_params['p_hazardous_favoured_less']
),QPOS_NORM, QPOS_less, QPOS_hazardous)
# Listing The quantum bayesian network
QUBITS = 7
def qbn_neo(norm_params, hazardous_params, hist=True, measure=False, shots=1):
def circuit(qc, qr=None, cr=None):
apply_islarge_fast(qc)
apply_norm(qc, norm_params)
apply_class(qc)
apply_hazardous(qc, hazardous_params)
return as_pqc(QUBITS, circuit, hist=hist, measure=measure, shots=shots)
# Listing Try the QBN
norm_params = {
'p_norm_small_slow': 0.25,
'p_norm_small_fast': 0.35,
'p_norm_large_slow': 0.45,
'p_norm_large_fast': 0.55
}
hazardous_params = {
'p_hazardous_favoured_more': 0.3,
'p_hazardous_favoured_med': 0.4,
'p_hazardous_favoured_less': 0.5,
'p_hazardous_unfavoured_more': 0.6,
'p_hazardous_unfavoured_med': 0.7,
'p_hazardous_unfavoured_less': 0.8
}
qbn_neo(norm_params, hazardous_params, hist=True)
# Listing Calculate the parameters of the norm
def calculate_norm_params(objects):
# the different diameteric objects in our data
pop_large = objects[objects.Categorized_Diameter.eq("Large")]
pop_small = objects[objects.Categorized_Diameter.eq("Small")]
# combinations of being a large object and Relative Velocity
pop_small_slow = pop_small[pop_small.Categorized_Relative_Vel.eq('Slow')]
pop_small_fast = pop_small[pop_small.Categorized_Relative_Vel.eq('Fast')]
pop_large_slow = pop_large[pop_large.Categorized_Relative_Vel.eq('Slow')]
pop_large_fast = pop_large[pop_large.Categorized_Relative_Vel.eq('Fast')]
norm_params = {
'p_norm_small_slow': pop_small_slow.Norm.sum() / len(pop_small_slow),
'p_norm_small_fast': pop_small_fast.Norm.sum() / len(pop_small_fast),
'p_norm_large_slow': pop_large_slow.Norm.sum() / len(pop_large_slow),
'p_norm_large_fast': pop_large_fast.Norm.sum() / len(pop_large_fast),
}
return norm_params
# Listing Calculate the parameters of hazardous
def calculate_hazardous_params(objects):
# all hazardous
hazardous = objects[objects.Hazardous.eq(1)]
# weight the object
def weight_object(norm, missDistance):
return lambda object: (object[0] if norm else 1-object[0]) * (1 if object[1] == missDistance else 0)
# calculate the probability of being hazardous
def calc_prob(norm, missDistance):
return sum(list(map(
weight_object(norm, missDistance),
list(zip(hazardous['Norm'], hazardous['Categorised_Miss_Distance']))
))) / sum(list(map(
weight_object(norm, missDistance),
list(zip(objects['Norm'], objects['Categorised_Miss_Distance']))
)))
hazardous_params = {
'p_hazardous_favoured_more': calc_prob(True, "More"),
'p_hazardous_favoured_med': calc_prob(True, "Medium"),
'p_hazardous_favoured_less': calc_prob(True, "Less"),
'p_hazardous_unfavoured_more': calc_prob(False, "More"),
'p_hazardous_unfavoured_med': calc_prob(False, "Medium"),
'p_hazardous_unfavoured_less': calc_prob(False, "Less")
}
return hazardous_params
# Listing Prepare the data
def prepare_data(objects, params):
"""
params = {
'p_norm_large_slow_hazardous': 0.45,
'p_norm_large_slow_nonhazardous': 0.46,
'p_norm_large_fast_hazardous': 0.47,
'p_norm_large_fast_nonhazardous': 0.48,
'p_norm_small_slow_hazardous': 0.49,
'p_norm_small_slow_nonhazardous': 0.51,
'p_norm_small_fast_hazardous': 0.52,
'p_norm_small_fast_nonhazardous': 0.53,
}
"""
# is the object large?
objects['IsLarge'] = objects['Categorized_Diameter'].map(lambda dia: 0 if dia == "Small" else 1)
# the probability of favored by norm given diameter, relative velocity, and hazardous
objects['Norm'] = list(map(
lambda item: params['p_norm_{}_{}_{}'.format(
'small' if item[0] == "Small" else 'large',
'slow' if item[1] == "Slow" else 'fast',
'nonhazardous' if item[2] == 0 else 'hazardous'
)],
list(zip(objects['IsLarge'], objects['Categorized_Relative_Vel'], objects['Hazardous']))
))
return objects
# Listing Initialize the parameters
# Step 0: Initialize the parameter values
params = {
'p_norm_large_slow_hazardous': 0.45,
'p_norm_large_slow_nonhazardous': 0.46,
'p_norm_large_fast_hazardous': 0.47,
'p_norm_large_fast_nonhazardous': 0.48,
'p_norm_small_slow_hazardous': 0.49,
'p_norm_small_slow_nonhazardous': 0.51,
'p_norm_small_fast_hazardous': 0.52,
'p_norm_small_fast_nonhazardous': 0.53,
}
# Listing Run the qbn
objects = prepare_data(test_input, params)
results = qbn_neo(calculate_norm_params(objects), calculate_hazardous_params(objects), hist=False)
print(objects)
objects.to_csv('objects.csv')
# Listing Get a list of relevant states
def filter_states(states, position, value):
return list(filter(lambda item: item[0][QUBITS-1-position] == str(value), states))
# Listing The states with hazardous objects
filter_states(results.items(), QPOS_hazardous, '1')
# Listing Calculate the marginal probability to be hazardous
def sum_states(states):
return sum(map(lambda item: item[1], states))
sum_states(filter_states(results.items(), QPOS_hazardous, '1'))
# Listing The log‐likelihood function adapted for our data
def log_likelihood_neo(data, results):
states = results.items()
def calc_prob(norm_val, islarge_val, rv_val, hazardous_val):
return sum_states(
filter_states(
filter_states(
filter_states(
filter_states(states, QPOS_RV, rv_val),
QPOS_dia, islarge_val
), QPOS_hazardous, hazardous_val
), QPOS_NORM, norm_val))
probs = {
'p_favoured_large_slow_hazardous': calc_prob('1', '1', '0', '1'),
'p_favoured_large_slow_nonhazardous': calc_prob('1', '1', '0', '0'),
'p_favoured_large_fast_hazardous': calc_prob('1', '1', '1', '1'),
'p_favoured_large_fast_nonhazardous': calc_prob('1', '1', '1', '0'),
'p_favoured_small_slow_hazardous': calc_prob('1', '0', '0', '1'),
'p_favoured_small_slow_nonhazardous': calc_prob('1', '0', '0', '0'),
'p_favoured_small_fast_hazardous': calc_prob('1', '0', '1', '1'),
'p_favoured_small_fast_nonhazardous': calc_prob('1', '0', '1', '0'),
'p_unfavoured_large_slow_hazardous': calc_prob('0', '1', '0', '1'),
'p_unfavoured_large_slow_nonhazardous': calc_prob('0', '1', '0', '0'),
'p_unfavoured_large_fast_hazardous': calc_prob('0', '1', '1', '1'),
'p_unfavoured_large_fast_nonhazardous': calc_prob('0', '1', '1', '0'),
'p_unfavoured_small_slow_hazardous': calc_prob('0', '0', '0', '1'),
'p_unfavoured_small_slow_nonhazardous': calc_prob('0', '0', '0', '0'),
'p_unfavoured_small_fast_hazardous': calc_prob('0', '0', '1', '1'),
'p_unfavoured_small_fast_nonhazardous': calc_prob('0', '0', '1', '0'),
}
return round(sum(map(
lambda item: log(probs['p_{}_{}_{}_{}'.format(
'unfavoured',
'small' if item[1] == 0 else 'large',
'slow' if item[2] == "Slow" else 'fast',
'nonhazardous' if item[3] == 0 else 'hazardous'
)] + probs['p_{}_{}_{}_{}'.format(
'favoured',
'small' if item[1] == 0 else 'large',
'slow' if item[2] == "Slow" else 'fast',
'nonhazardous' if item[3] == 0 else 'hazardous'
)]
),
list(zip(data['Norm'], data['IsLarge'], data['Categorized_Relative_Vel'], data['Hazardous']))
)), 3)
# Listing Calculate the log‐likelihood
log_likelihood_neo(test_input, results)
# Listing Obtain new object values from the results
def to_params(results):
states = results.items()
def calc_norm(islarge_val, rv_val, hazardous_val):
pop = filter_states(filter_states(filter_states(states, QPOS_RV, rv_val), QPOS_dia, islarge_val), QPOS_hazardous, hazardous_val)
p_norm = sum(map(lambda item: item[1], filter_states(pop, QPOS_NORM, '1')))
p_total = sum(map(lambda item: item[1], pop))
return p_norm / p_total
return {
'p_norm_large_slow_hazardous': calc_norm('1', '0', '1'),
'p_norm_large_slow_nonhazardous': calc_norm('1', '0', '0'),
'p_norm_large_fast_hazardous': calc_norm('1', '1', '1'),
'p_norm_large_fast_nonhazardous': calc_norm('1', '1', '0'),
'p_norm_small_slow_hazardous': calc_norm('0', '0', '1'),
'p_norm_small_slow_nonhazardous': calc_norm('0', '0', '0'),
'p_norm_small_fast_hazardous': calc_norm('0', '1', '1'),
'p_norm_small_fast_nonhazardous': calc_norm('0', '1', '0'),
}
# Listing Calcualte new objects
to_params(results)
# Listing The recursive training automatism
def train_qbn_neo(objects, params, iterations):
if iterations > 0:
new_params = train_qbn_neo(objects, params, iterations - 1)
objects = prepare_data(objects, new_params)
results = qbn_neo(calculate_norm_params(objects), calculate_hazardous_params(objects), hist=False)
print ('The log-likelihood after {} iteration(s) is {}'.format(iterations, log_likelihood_neo(objects, results)))
return to_params(results)
return params
# Listing Train the QBN
test_params = train_qbn_neo(test_input, {
'p_norm_large_slow_hazardous': 0.45,
'p_norm_large_slow_nonhazardous': 0.46,
'p_norm_large_fast_hazardous': 0.47,
'p_norm_large_fast_nonhazardous': 0.48,
'p_norm_small_slow_hazardous': 0.49,
'p_norm_small_slow_nonhazardous': 0.51,
'p_norm_small_fast_hazardous': 0.52,
'p_norm_small_fast_nonhazardous': 0.53,
}, 25)
# Listing The parameters after training
test_params
# Listing Pre‐processing
def pre_process(object):
return (object['IsLarge'] == 1, object['Categorized_Relative_Vel'] == 'Fast', object['Categorised_Miss_Distance'])
# Listing Apply the known data on the quantum circuit
def apply_known(qc, is_large, is_fast, missDistance):
if is_large:
qc.x(QPOS_dia)
if is_fast:
qc.x(QPOS_RV)
qc.x(QPOS_more if missDistance == "More" else (QPOS_med if missDistance == "Medium" else QPOS_less))
# Listing Get the trained QBN
def get_trained_qbn(objects, params):
prepared_objects = prepare_data(objects, params)
norm_params = calculate_norm_params(prepared_objects)
hazardous_params = calculate_hazardous_params(prepared_objects)
def trained_qbn_neo(object):
(is_large, is_fast, missDistance) = object
def circuit(qc, qr, cr):
apply_known(qc, is_large, is_fast, missDistance)
apply_norm(qc, norm_params)
apply_hazardous(qc, hazardous_params)
qc.measure(qr[QPOS_hazardous], cr[0])
return as_pqc(QUBITS, circuit, hist=False, measure=True, shots=100)
return trained_qbn_neo
# Listing Post‐processing
def post_process(counts):
"""
counts -- the result of the quantum circuit execution
returns the prediction
"""
#print (counts)
p_hazardous = counts['1'] if '1' in counts.keys() else 0
p_nonhazardous = counts['0'] if '0' in counts.keys() else 0
#return int(list(map(lambda item: item[0], counts.items()))[0])
return 1 if p_hazardous > p_nonhazardous else 0
# Preparing Report
def run(f_classify, x):
return list(map(f_classify, x))
def specificity(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[0][1]) if (matrix[0][0]+matrix[0][1] > 0) else 0
def npv(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[1][0]) if (matrix[0][0]+matrix[1][0] > 0) else 0
def classifier_report(name, run, classify, input, labels):
cr_predictions = run(classify, input)
cr_cm = confusion_matrix(labels, cr_predictions)
output = pd.DataFrame(list(zip(labels, cr_predictions)), columns=['Labels', 'Predictions'])
output.to_csv('output.csv')
cr_precision = precision_score(labels, cr_predictions)
cr_recall = recall_score(labels, cr_predictions)
cr_specificity = specificity(cr_cm)
cr_npv = npv(cr_cm)
cr_level = 0.25*(cr_precision + cr_recall + cr_specificity + cr_npv)
print("Confusion Matrix is:")
print(cr_cm)
print('The precision score of the {} classifier is {}'
.format(name, cr_precision))
print('The recall score of the {} classifier is {}'
.format(name, cr_recall))
print('The specificity score of the {} classifier is {}'
.format(name, cr_specificity))
print('The npv score of the {} classifier is {}'
.format(name, cr_npv))
print('The information level is: {}'
.format(cr_level))
# Listing Run the Quantum Naive Bayes Classifier
# redefine the run-function
def run(f_classify, data):
return [f_classify(data.iloc[i]) for i in range(0,len(data))]
# get the simple qbn
trained_qbn = get_trained_qbn(test_input, test_params)
# evaluate the Quantum Bayesian Network
classifier_report("QBN",
run,
lambda object: post_process(trained_qbn(pre_process(object))),
objects,
test_input['Hazardous'])
|
https://github.com/qiskit-community/qiskit-jku-provider
|
qiskit-community
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Exception for errors raised by JKU simulator.
"""
from qiskit import QiskitError
class JKUSimulatorError(QiskitError):
"""Class for errors raised by the JKU simulator."""
def __init__(self, *message):
"""Set the error message."""
super().__init__(*message)
self.message = ' '.join(message)
def __str__(self):
"""Return the message."""
return repr(self.message)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import matplotlib.pyplot as plt
import numpy as np
from qiskit.algorithms.optimizers import COBYLA
from qiskit.circuit.library import RealAmplitudes
from qiskit.primitives import Sampler
from qiskit.utils import algorithm_globals
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from qiskit_machine_learning.algorithms.classifiers import VQC
from IPython.display import clear_output
algorithm_globals.random_seed = 42
sampler1 = Sampler()
sampler2 = Sampler()
num_samples = 40
num_features = 2
features = 2 * algorithm_globals.random.random([num_samples, num_features]) - 1
labels = 1 * (np.sum(features, axis=1) >= 0) # in { 0, 1}
features = MinMaxScaler().fit_transform(features)
features.shape
features[0:5, :]
labels = OneHotEncoder(sparse=False).fit_transform(labels.reshape(-1, 1))
labels.shape
labels[0:5, :]
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, train_size=30, random_state=algorithm_globals.random_seed
)
train_features.shape
def plot_dataset():
plt.scatter(
train_features[np.where(train_labels[:, 0] == 0), 0],
train_features[np.where(train_labels[:, 0] == 0), 1],
marker="o",
color="b",
label="Label 0 train",
)
plt.scatter(
train_features[np.where(train_labels[:, 0] == 1), 0],
train_features[np.where(train_labels[:, 0] == 1), 1],
marker="o",
color="g",
label="Label 1 train",
)
plt.scatter(
test_features[np.where(test_labels[:, 0] == 0), 0],
test_features[np.where(test_labels[:, 0] == 0), 1],
marker="o",
facecolors="w",
edgecolors="b",
label="Label 0 test",
)
plt.scatter(
test_features[np.where(test_labels[:, 0] == 1), 0],
test_features[np.where(test_labels[:, 0] == 1), 1],
marker="o",
facecolors="w",
edgecolors="g",
label="Label 1 test",
)
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0)
plt.plot([1, 0], [0, 1], "--", color="black")
plot_dataset()
plt.show()
maxiter = 20
objective_values = []
# callback function that draws a live plot when the .fit() method is called
def callback_graph(_, objective_value):
clear_output(wait=True)
objective_values.append(objective_value)
plt.title("Objective function value against iteration")
plt.xlabel("Iteration")
plt.ylabel("Objective function value")
stage1_len = np.min((len(objective_values), maxiter))
stage1_x = np.linspace(1, stage1_len, stage1_len)
stage1_y = objective_values[:stage1_len]
stage2_len = np.max((0, len(objective_values) - maxiter))
stage2_x = np.linspace(maxiter, maxiter + stage2_len - 1, stage2_len)
stage2_y = objective_values[maxiter : maxiter + stage2_len]
plt.plot(stage1_x, stage1_y, color="orange")
plt.plot(stage2_x, stage2_y, color="purple")
plt.show()
plt.rcParams["figure.figsize"] = (12, 6)
original_optimizer = COBYLA(maxiter=maxiter)
ansatz = RealAmplitudes(num_features)
initial_point = np.asarray([0.5] * ansatz.num_parameters)
original_classifier = VQC(
ansatz=ansatz, optimizer=original_optimizer, callback=callback_graph, sampler=sampler1
)
original_classifier.fit(train_features, train_labels)
print("Train score", original_classifier.score(train_features, train_labels))
print("Test score ", original_classifier.score(test_features, test_labels))
original_classifier.save("vqc_classifier.model")
loaded_classifier = VQC.load("vqc_classifier.model")
loaded_classifier.warm_start = True
loaded_classifier.neural_network.sampler = sampler2
loaded_classifier.optimizer = COBYLA(maxiter=80)
loaded_classifier.fit(train_features, train_labels)
print("Train score", loaded_classifier.score(train_features, train_labels))
print("Test score", loaded_classifier.score(test_features, test_labels))
train_predicts = loaded_classifier.predict(train_features)
test_predicts = loaded_classifier.predict(test_features)
# return plot to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
plot_dataset()
# plot misclassified data points
plt.scatter(
train_features[np.all(train_labels != train_predicts, axis=1), 0],
train_features[np.all(train_labels != train_predicts, axis=1), 1],
s=200,
facecolors="none",
edgecolors="r",
linewidths=2,
)
plt.scatter(
test_features[np.all(test_labels != test_predicts, axis=1), 0],
test_features[np.all(test_labels != test_predicts, axis=1), 1],
s=200,
facecolors="none",
edgecolors="r",
linewidths=2,
)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/hephaex/Quantum-Computing-Awesome-List
|
hephaex
|
import cirq
## Function for visualizing output
def bitstring(bits):
return ''.join('1' if e else '0' for e in bits)
## Create two quantum and classical registers
qreg = [cirq.LineQubit(x) for x in range(2)]
circuit = cirq.Circuit()
message = {"00" : [],
"10" : [cirq.X(qreg[0])],
"01" : [cirq.Z(qreg[0])],
"11" : [cirq.X(qreg[0]), cirq.Z(qreg[0])]}
## Creating a bell pair
circuit.append(cirq.H(qreg[0]))
circuit.append(cirq.CNOT(qreg[0], qreg[1]))
m = "10"
print("Alice sent a message")
## Encodding the message in qubits
circuit.append(message[m])
## Bob is trying to decode
circuit.append(cirq.CNOT(qreg[0], qreg[1]))
circuit.append(cirq.H(qreg[0]))
circuit.append([cirq.measure(qreg[0]), cirq.measure(qreg[1])])
print("\n Circuit is :")
print(circuit)
sim = cirq.Simulator()
res = sim.run(circuit, repetitions = 1)
print("BOB recieved message: ",bitstring(res.measurements.values()))
|
https://github.com/Pitt-JonesLab/mirror-gates
|
Pitt-JonesLab
|
from qiskit import QuantumCircuit
qc1 = QuantumCircuit(3)
qc1.cx(1, 0)
qc1.cx(0, 1)
qc1.cz(1, 2)
qc1.cx(0, 1)
qc1.cx(1, 0)
qc1.draw("mpl")
iswap_sub = QuantumCircuit(2)
iswap_sub.h(0)
iswap_sub.sdg(0)
iswap_sub.sdg(1)
iswap_sub.iswap(0, 1)
iswap_sub.h(1)
iswap_sub.draw("mpl")
qc1 = QuantumCircuit(3)
qc1.cx(1, 0)
qc1.cx(0, 1)
qc1.cz(1, 2)
qc1.h(0)
qc1.sdg(0)
qc1.sdg(1)
qc1.iswap(0, 1)
qc1.h(1)
qc1.draw("mpl")
reverse_cx = QuantumCircuit(2)
reverse_cx.h(1)
reverse_cx.h(1)
reverse_cx.iswap(0, 1)
reverse_cx.s(0)
reverse_cx.s(1)
reverse_cx.h(0)
reverse_cx.h(0)
reverse_cx.sdg(0)
reverse_cx.sdg(1)
reverse_cx.draw("mpl")
qc0 = QuantumCircuit(2)
qc0.cx(1, 0)
qc0.cx(0, 1)
qc0.draw("mpl")
qc2 = QuantumCircuit(3)
# qc2.cx(1,0)
qc2.cx(0, 1)
qc2.cz(1, 2)
qc2.cx(0, 1)
# qc2.cx(1,0)
qc2.draw("mpl")
from qiskit.quantum_info import Operator
Operator(qc1).equiv(Operator(qc2))
qc3 = QuantumCircuit(2)
qc3.cx(0, 1)
qc3.cx(1, 0)
qc4 = QuantumCircuit(2)
qc4.cx(1, 0)
qc4.cx(0, 1)
Operator(qc3).equiv(Operator(qc4))
qc5 = QuantumCircuit(5)
qc5.iswap(0, 1)
print(Operator(qc3).equiv(Operator(qc5)))
print(Operator(qc4).equiv(Operator(qc5)))
from weylchamber import c1c2c3
print(c1c2c3(Operator(qc3).data))
print(c1c2c3(Operator(qc4).data))
# use for making some figures of QFT circuit dag for the paper
from qiskit.circuit.library import QFT, TwoLocal
qc = QFT(4).decompose()
# qc = TwoLocal(4, ['ry'], 'cx', reps=1, entanglement='full', insert_barriers=False)
qc.decompose().draw(output="mpl") # , filename="twolocal_base.svg")
from qiskit import QuantumCircuit
qc = QuantumCircuit(3)
qc.cx(0, 1)
qc.cx(1, 2)
qc.barrier()
qc.cx(0, 1)
qc.cx(2, 1)
qc.swap(0, 1)
qc.draw("mpl")
# line coupling_map
from qiskit.transpiler import CouplingMap
line = CouplingMap.from_line(3)
grid = CouplingMap.from_grid(2, 2)
topo = line
from qiskit import transpile
transp1 = transpile(
qc, coupling_map=topo, optimization_level=3 # , initial_layout=[0, 1, 2, 3]
)
transp1.draw(output="mpl") # , filename="twolocal_qiskit.svg")
from qiskit.converters import circuit_to_dag
dag = circuit_to_dag(transp1)
# dag only keep 2Q nodes
for node in dag.topological_op_nodes():
if node.op.num_qubits < 2:
dag.remove_op_node(node)
dag.draw()
from virtual_swap.pass_managers import SabreMS, SabreQiskit
from qiskit.circuit.library import iSwapGate
from virtual_swap.pass_managers import SabreMS, SabreQiskit
from transpile_benchy.metrics import DepthMetric
for _ in range(1):
runner = SabreMS(topo) # , cx_basis=True)
transp = runner.run(qc)
# mid0 = runner.pm.property_set["mid0"]
mid = runner.pm.property_set["circuit_progress"]
print(runner.pm.property_set["monodromy_depth"])
# if DepthMetric.calculate(transp) <= 22:
# break
mid.draw(output="mpl", fold=-1) # , filename="twolocal_cns.svg")
transp.decompose().draw(output="mpl")
# set original qc to use
from qiskit import transpile
# qc2 = transpile(qc, initial_layout=runner.pm.property_set["layout"], coupling_map=coupling_map)
# qc2 = transpile(qc, coupling_map=coupling_map, optimization_level=3)
for _ in range(5):
pm2 = SabreQiskit(topo) # , cx_basis=True)
qc2 = pm2.run(qc)
mid = pm2.pm.property_set["circuit_progress"]
print(DepthMetric.calculate(qc2))
mid.draw(output="mpl", fold=-1)
from qiskit.converters import circuit_to_dag
# remove 1Q nodes
dag = circuit_to_dag(mid_qc)
for node in dag.topological_op_nodes():
if node.op.num_qubits < 2:
dag.remove_op_node(node)
dag.draw()
pm.pm.property_set["layout"]
|
https://github.com/mmetcalf14/Hamiltonian_Downfolding_IBM
|
mmetcalf14
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Boolean Logical AND and OR Gates.
"""
import logging
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.qasm import pi
from qiskit.aqua import AquaError
from .multi_control_toffoli_gate import mct
logger = logging.getLogger(__name__)
def _logical_and(circuit, variable_register, flags, target_qubit, ancillary_register, mct_mode):
if flags is not None:
zvf = list(zip(variable_register, flags))
ctl_bits = [v for v, f in zvf if f]
anc_bits = None
if ancillary_register:
anc_bits = [ancillary_register[idx] for idx in range(np.count_nonzero(flags) - 2)]
[circuit.u3(pi, 0, pi, v) for v, f in zvf if f < 0]
circuit.mct(ctl_bits, target_qubit, anc_bits, mode=mct_mode)
[circuit.u3(pi, 0, pi, v) for v, f in zvf if f < 0]
def _logical_or(circuit, qr_variables, flags, qb_target, qr_ancillae, mct_mode):
circuit.u3(pi, 0, pi, qb_target)
if flags is not None:
zvf = list(zip(qr_variables, flags))
ctl_bits = [v for v, f in zvf if f]
anc_bits = [qr_ancillae[idx] for idx in range(np.count_nonzero(flags) - 2)] if qr_ancillae else None
[circuit.u3(pi, 0, pi, v) for v, f in zvf if f > 0]
circuit.mct(ctl_bits, qb_target, anc_bits, mode=mct_mode)
[circuit.u3(pi, 0, pi, v) for v, f in zvf if f > 0]
def _do_checks(flags, qr_variables, qb_target, qr_ancillae, circuit):
# check flags
if flags is None:
flags = [1 for i in range(len(qr_variables))]
else:
if len(flags) > len(qr_variables):
raise AquaError('`flags` cannot be longer than `qr_variables`.')
# check variables
# TODO: improve the check
if isinstance(qr_variables, QuantumRegister) or isinstance(qr_variables, list):
variable_qubits = [qb for qb, i in zip(qr_variables, flags) if not i == 0]
else:
raise ValueError('A QuantumRegister or list of qubits is expected for variables.')
# check target
if isinstance(qb_target, tuple):
target_qubit = qb_target
else:
raise ValueError('A single qubit is expected for the target.')
# check ancilla
if qr_ancillae is None:
ancillary_qubits = []
elif isinstance(qr_ancillae, QuantumRegister):
ancillary_qubits = [qb for qb in qr_ancillae]
elif isinstance(qr_ancillae, list):
ancillary_qubits = qr_ancillae
else:
raise ValueError('An optional list of qubits or a QuantumRegister is expected for ancillae.')
all_qubits = variable_qubits + [target_qubit] + ancillary_qubits
circuit._check_qargs(all_qubits)
circuit._check_dups(all_qubits)
return flags
def logical_and(self, qr_variables, qb_target, qr_ancillae, flags=None, mct_mode='basic'):
"""
Build a collective conjunction (AND) circuit in place using mct.
Args:
self (QuantumCircuit): The QuantumCircuit object to build the conjunction on.
variable_register (QuantumRegister): The QuantumRegister holding the variable qubits.
flags (list): A list of +1/-1/0 to mark negations or omissions of qubits.
target_qubit (tuple(QuantumRegister, int)): The target qubit to hold the conjunction result.
ancillary_register (QuantumRegister): The ancillary QuantumRegister for building the mct.
mct_mode (str): The mct building mode.
"""
flags = _do_checks(flags, qr_variables, qb_target, qr_ancillae, self)
_logical_and(self, qr_variables, flags, qb_target, qr_ancillae, mct_mode)
def logical_or(self, qr_variables, qb_target, qr_ancillae, flags=None, mct_mode='basic'):
"""
Build a collective disjunction (OR) circuit in place using mct.
Args:
self (QuantumCircuit): The QuantumCircuit object to build the disjunction on.
qr_variables (QuantumRegister): The QuantumRegister holding the variable qubits.
flags (list): A list of +1/-1/0 to mark negations or omissions of qubits.
qb_target (tuple(QuantumRegister, int)): The target qubit to hold the disjunction result.
qr_ancillae (QuantumRegister): The ancillary QuantumRegister for building the mct.
mct_mode (str): The mct building mode.
"""
flags = _do_checks(flags, qr_variables, qb_target, qr_ancillae, self)
_logical_or(self, qr_variables, flags, qb_target, qr_ancillae, mct_mode)
QuantumCircuit.AND = logical_and
QuantumCircuit.OR = logical_or
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as jnp
import torch
import numpy as np
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader,SpiderAnsatz
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel, PytorchModel, PytorchTrainer
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 20
EPOCHS = 100
SEED = 0
LEARNING_RATE = 3e-2
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
#cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
#cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
#cleaned_qnlp.head(10)
#cleaned_qnlp.info()
#sns.countplot(x = "target", data = cleaned_qnlp)
#cleaned_lemmatized_qnlp.head(10)
#cleaned_lemmatized_qnlp.info()
#sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
NUM_DATA = 2578
sig = torch.sigmoid
def accuracy(y_hat, y):
return torch.sum(torch.eq(torch.round(sig(y_hat)), y))/len(y)/2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(cleaned__lemmatized_stemmed_qnlp)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
# ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
ansatz_1 = SpiderAnsatz({AtomicType.NOUN: 2, AtomicType.SENTENCE: 2, AtomicType.PREPOSITIONAL_PHRASE: 2, AtomicType.NOUN_PHRASE:2, AtomicType.CONJUNCTION:2})
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = PytorchModel.from_diagrams(all_circuits_1)
trainer_1 = PytorchTrainer(
model=model_1,
loss_function=torch.nn.BCEWithLogitsLoss(),
optimizer=torch.optim.AdamW, # type: ignore
learning_rate=LEARNING_RATE,
epochs=EPOCHS,
evaluate_functions={"acc": accuracy},
evaluate_on_train=True,
verbose='text',
seed=SEED)
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=5)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
|
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The EfficientSU2 2-local circuit."""
from __future__ import annotations
import typing
from collections.abc import Callable
from numpy import pi
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library.standard_gates import RYGate, RZGate, CXGate
from .two_local import TwoLocal
if typing.TYPE_CHECKING:
import qiskit # pylint: disable=cyclic-import
class EfficientSU2(TwoLocal):
r"""The hardware efficient SU(2) 2-local circuit.
The ``EfficientSU2`` circuit consists of layers of single qubit operations spanned by SU(2)
and :math:`CX` entanglements. This is a heuristic pattern that can be used to prepare trial wave
functions for variational quantum algorithms or classification circuit for machine learning.
SU(2) stands for special unitary group of degree 2, its elements are :math:`2 \times 2`
unitary matrices with determinant 1, such as the Pauli rotation gates.
On 3 qubits and using the Pauli :math:`Y` and :math:`Z` su2_gates as single qubit gates, the
hardware efficient SU(2) circuit is represented by:
.. parsed-literal::
┌──────────┐┌──────────┐ ░ ░ ░ ┌───────────┐┌───────────┐
┤ RY(θ[0]) ├┤ RZ(θ[3]) ├─░────────■───░─ ... ─░─┤ RY(θ[12]) ├┤ RZ(θ[15]) ├
├──────────┤├──────────┤ ░ ┌─┴─┐ ░ ░ ├───────────┤├───────────┤
┤ RY(θ[1]) ├┤ RZ(θ[4]) ├─░───■──┤ X ├─░─ ... ─░─┤ RY(θ[13]) ├┤ RZ(θ[16]) ├
├──────────┤├──────────┤ ░ ┌─┴─┐└───┘ ░ ░ ├───────────┤├───────────┤
┤ RY(θ[2]) ├┤ RZ(θ[5]) ├─░─┤ X ├──────░─ ... ─░─┤ RY(θ[14]) ├┤ RZ(θ[17]) ├
└──────────┘└──────────┘ ░ └───┘ ░ ░ └───────────┘└───────────┘
See :class:`~qiskit.circuit.library.RealAmplitudes` for more detail on the possible arguments
and options such as skipping unentanglement qubits, which apply here too.
Examples:
>>> circuit = EfficientSU2(3, reps=1)
>>> print(circuit)
┌──────────┐┌──────────┐ ┌──────────┐┌──────────┐
q_0: ┤ RY(θ[0]) ├┤ RZ(θ[3]) ├──■────■──┤ RY(θ[6]) ├┤ RZ(θ[9]) ├─────────────
├──────────┤├──────────┤┌─┴─┐ │ └──────────┘├──────────┤┌───────────┐
q_1: ┤ RY(θ[1]) ├┤ RZ(θ[4]) ├┤ X ├──┼───────■──────┤ RY(θ[7]) ├┤ RZ(θ[10]) ├
├──────────┤├──────────┤└───┘┌─┴─┐ ┌─┴─┐ ├──────────┤├───────────┤
q_2: ┤ RY(θ[2]) ├┤ RZ(θ[5]) ├─────┤ X ├───┤ X ├────┤ RY(θ[8]) ├┤ RZ(θ[11]) ├
└──────────┘└──────────┘ └───┘ └───┘ └──────────┘└───────────┘
>>> ansatz = EfficientSU2(4, su2_gates=['rx', 'y'], entanglement='circular', reps=1)
>>> qc = QuantumCircuit(4) # create a circuit and append the RY variational form
>>> qc.compose(ansatz, inplace=True)
>>> qc.draw()
┌──────────┐┌───┐┌───┐ ┌──────────┐ ┌───┐
q_0: ┤ RX(θ[0]) ├┤ Y ├┤ X ├──■──┤ RX(θ[4]) ├───┤ Y ├─────────────────────
├──────────┤├───┤└─┬─┘┌─┴─┐└──────────┘┌──┴───┴───┐ ┌───┐
q_1: ┤ RX(θ[1]) ├┤ Y ├──┼──┤ X ├─────■──────┤ RX(θ[5]) ├───┤ Y ├─────────
├──────────┤├───┤ │ └───┘ ┌─┴─┐ └──────────┘┌──┴───┴───┐┌───┐
q_2: ┤ RX(θ[2]) ├┤ Y ├──┼──────────┤ X ├─────────■──────┤ RX(θ[6]) ├┤ Y ├
├──────────┤├───┤ │ └───┘ ┌─┴─┐ ├──────────┤├───┤
q_3: ┤ RX(θ[3]) ├┤ Y ├──■──────────────────────┤ X ├────┤ RX(θ[7]) ├┤ Y ├
└──────────┘└───┘ └───┘ └──────────┘└───┘
"""
def __init__(
self,
num_qubits: int | None = None,
su2_gates: str
| type
| qiskit.circuit.Instruction
| QuantumCircuit
| list[str | type | qiskit.circuit.Instruction | QuantumCircuit]
| None = None,
entanglement: str | list[list[int]] | Callable[[int], list[int]] = "reverse_linear",
reps: int = 3,
skip_unentangled_qubits: bool = False,
skip_final_rotation_layer: bool = False,
parameter_prefix: str = "θ",
insert_barriers: bool = False,
initial_state: QuantumCircuit | None = None,
name: str = "EfficientSU2",
flatten: bool | None = None,
) -> None:
"""
Args:
num_qubits: The number of qubits of the EfficientSU2 circuit.
reps: Specifies how often the structure of a rotation layer followed by an entanglement
layer is repeated.
su2_gates: The SU(2) single qubit gates to apply in single qubit gate layers.
If only one gate is provided, the same gate is applied to each qubit.
If a list of gates is provided, all gates are applied to each qubit in the provided
order.
entanglement: Specifies the entanglement structure. Can be a string ('full', 'linear'
, 'reverse_linear', 'circular' or 'sca'), a list of integer-pairs specifying the indices
of qubits entangled with one another, or a callable returning such a list provided with
the index of the entanglement layer.
Default to 'reverse_linear' entanglement.
Note that 'reverse_linear' entanglement provides the same unitary as 'full'
with fewer entangling gates.
See the Examples section of :class:`~qiskit.circuit.library.TwoLocal` for more
detail.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
skip_unentangled_qubits: If True, the single qubit gates are only applied to qubits
that are entangled with another qubit. If False, the single qubit gates are applied
to each qubit in the Ansatz. Defaults to False.
skip_final_rotation_layer: If False, a rotation layer is added at the end of the
ansatz. If True, no rotation layer is added.
parameter_prefix: The parameterized gates require a parameter to be defined, for which
we use :class:`~qiskit.circuit.ParameterVector`.
insert_barriers: If True, barriers are inserted in between each layer. If False,
no barriers are inserted.
flatten: Set this to ``True`` to output a flat circuit instead of nesting it inside multiple
layers of gate objects. By default currently the contents of
the output circuit will be wrapped in nested objects for
cleaner visualization. However, if you're using this circuit
for anything besides visualization its **strongly** recommended
to set this flag to ``True`` to avoid a large performance
overhead for parameter binding.
"""
if su2_gates is None:
su2_gates = [RYGate, RZGate]
super().__init__(
num_qubits=num_qubits,
rotation_blocks=su2_gates,
entanglement_blocks=CXGate,
entanglement=entanglement,
reps=reps,
skip_unentangled_qubits=skip_unentangled_qubits,
skip_final_rotation_layer=skip_final_rotation_layer,
parameter_prefix=parameter_prefix,
insert_barriers=insert_barriers,
initial_state=initial_state,
name=name,
flatten=flatten,
)
@property
def parameter_bounds(self) -> list[tuple[float, float]]:
"""Return the parameter bounds.
Returns:
The parameter bounds.
"""
return self.num_parameters * [(-pi, pi)]
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute
from qiskit import Aer
import numpy as np
import sys
N=int(sys.argv[1])
filename = sys.argv[2]
backend = Aer.get_backend('unitary_simulator')
def GHZ(n):
if n<=0:
return None
circ = QuantumCircuit(n)
# Put your code below
# ----------------------------
circ.h(0)
for x in range(1,n):
circ.cx(x-1,x)
# ----------------------------
return circ
circuit = GHZ(N)
job = execute(circuit, backend, shots=8192)
result = job.result()
array = result.get_unitary(circuit,3)
np.savetxt(filename, array, delimiter=",", fmt = "%0.3f")
|
https://github.com/QPower-Research/QPowerAlgo
|
QPower-Research
|
import cirq
import numpy as np
from qiskit import QuantumCircuit, execute, Aer
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15,10)
q0, q1, q2 = [cirq.LineQubit(i) for i in range(3)]
circuit = cirq.Circuit()
#entagling the 2 quibits in different laboratories
#and preparing the qubit to send
circuit.append(cirq.H(q0))
circuit.append(cirq.H(q1))
circuit.append(cirq.CNOT(q1, q2))
#entangling the qubit we want to send to the one in the first laboratory
circuit.append(cirq.CNOT(q0, q1))
circuit.append(cirq.H(q0))
#measurements
circuit.append(cirq.measure(q0, q1))
#last transformations to obtain the qubit information
circuit.append(cirq.CNOT(q1, q2))
circuit.append(cirq.CZ(q0, q2))
#measure of the qubit in the receiving laboratory along z axis
circuit.append(cirq.measure(q2, key = 'Z'))
circuit
#starting simulation
sim = cirq.Simulator()
results = sim.run(circuit, repetitions=100)
sns.histplot(results.measurements['Z'], discrete = True)
100 - np.count_nonzero(results.measurements['Z']), np.count_nonzero(results.measurements['Z'])
#in qiskit the qubits are integrated in the circuit
qc = QuantumCircuit(3, 1)
#entangling
qc.h(0)
qc.h(1)
qc.cx(1, 2)
qc.cx(0, 1)
#setting for measurment
qc.h(0)
qc.measure([0,1], [0,0])
#transformation to obtain qubit sent
qc.cx(1, 2)
qc.cz(0, 2)
qc.measure(2, 0)
print(qc)
#simulation
simulator = Aer.get_backend('qasm_simulator')
job = execute(qc, simulator, shots=100)
res = job.result().get_counts(qc)
plt.bar(res.keys(), res.values())
res
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
input_3sat_instance = '''
c example DIMACS-CNF 3-SAT
p cnf 3 5
-1 -2 -3 0
1 -2 3 0
1 2 -3 0
1 -2 -3 0
-1 2 3 0
'''
import os
import tempfile
from qiskit.exceptions import MissingOptionalLibraryError
from qiskit.circuit.library.phase_oracle import PhaseOracle
fp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
fp.write(input_3sat_instance)
file_name = fp.name
fp.close()
oracle = None
try:
oracle = PhaseOracle.from_dimacs_file(file_name)
except MissingOptionalLibraryError as ex:
print(ex)
finally:
os.remove(file_name)
from qiskit.algorithms import AmplificationProblem
problem = None
if oracle is not None:
problem = AmplificationProblem(oracle, is_good_state=oracle.evaluate_bitstring)
from qiskit.algorithms import Grover
from qiskit.primitives import Sampler
grover = Grover(sampler=Sampler())
result = None
if problem is not None:
result = grover.amplify(problem)
print(result.assignment)
from qiskit.tools.visualization import plot_histogram
if result is not None:
display(plot_histogram(result.circuit_results[0]))
expression = '(w ^ x) & ~(y ^ z) & (x & y & z)'
try:
oracle = PhaseOracle(expression)
problem = AmplificationProblem(oracle, is_good_state=oracle.evaluate_bitstring)
grover = Grover(sampler=Sampler())
result = grover.amplify(problem)
display(plot_histogram(result.circuit_results[0]))
except MissingOptionalLibraryError as ex:
print(ex)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(1, 1)
qc.h(0)
qc.measure(0,0)
qc.x(0).c_if(0, 0)
qc.draw(output='mpl')
from qiskit import QuantumRegister, ClassicalRegister
q = QuantumRegister(3, 'q')
c = ClassicalRegister(3, 'c')
qc = QuantumCircuit(q, c)
qc.h([0, 1, 2])
qc.barrier()
qc.measure(q, c)
qc.draw('mpl')
print(bin(3))
print(bin(7))
qc.x(2).c_if(c, 3) # for the 011 case
qc.x(2).c_if(c, 7) # for the 111 case
qc.draw(output='mpl')
nq = 2
m = 2
q = QuantumRegister(nq, 'q')
c = ClassicalRegister(m, 'c')
qc_S = QuantumCircuit(q,c)
qc_S.h(0)
qc_S.x(1)
qc_S.draw('mpl')
from math import pi
cu_circ = QuantumCircuit(2)
cu_circ.cp(pi/2, 0, 1)
cu_circ.draw('mpl')
for _ in range(2 ** (m - 1)):
qc_S.cp(pi/2, 0, 1)
qc_S.draw('mpl')
def x_measurement(qc, qubit, cbit):
"""Measure 'qubit' in the X-basis, and store the result in 'cbit'"""
qc.h(qubit)
qc.measure(qubit, cbit)
x_measurement(qc_S, q[0], c[0])
qc_S.draw('mpl')
qc_S.reset(0)
qc_S.h(0)
qc_S.draw('mpl')
qc_S.p(-pi/2, 0).c_if(c, 1)
qc_S.draw('mpl')
## 2^t c-U operations (with t=m-2)
for _ in range(2 ** (m - 2)):
qc_S.cp(pi/2, 0, 1)
x_measurement(qc_S, q[0], c[1])
qc_S.draw('mpl')
import matplotlib.pyplot as plt
from qiskit.tools.visualization import plot_histogram
from qiskit_aer.primitives import Sampler
sampler = Sampler()
job = sampler.run(qc_S)
result = job.result()
dist0 = result.quasi_dists[0]
key_new = [str(key/2**m) for key in list(dist0.keys())]
dist1 = dict(zip(key_new, dist0.values()))
fig, ax = plt.subplots(1,2)
plot_histogram(dist0, ax=ax[0])
plot_histogram(dist1, ax=ax[1])
plt.tight_layout()
nq = 3 # number of qubits
m = 3 # number of classical bits
q = QuantumRegister(nq,'q')
c = ClassicalRegister(m,'c')
qc = QuantumCircuit(q,c)
qc.h(0)
qc.x([1, 2])
qc.draw('mpl')
cu_circ = QuantumCircuit(nq)
cu_circ.mcp(pi/4, [0, 1], 2)
cu_circ.draw('mpl')
for _ in range(2 ** (m - 1)):
qc.mcp(pi/4, [0, 1], 2)
qc.draw('mpl')
x_measurement(qc, q[0], c[0])
qc.draw('mpl')
qc.reset(0)
qc.h(0)
qc.draw('mpl')
qc.p(-pi/2, 0).c_if(c, 1)
qc.draw('mpl')
for _ in range(2 ** (m - 2)):
qc.mcp(pi/4, [0, 1], 2)
x_measurement(qc, q[0], c[1])
qc.draw('mpl')
# initialization of qubit q0
qc.reset(0)
qc.h(0)
# phase correction
qc.p(-pi/4, 0).c_if(c, 1)
qc.p(-pi/2, 0).c_if(c, 2)
qc.p(-3*pi/2, 0).c_if(c, 3)
# c-U operations
for _ in range(2 ** (m - 3)):
qc.mcp(pi/4, [0, 1], 2)
# X measurement
qc.h(0)
qc.measure(0, 2)
qc.draw('mpl')
result = sampler.run(qc).result()
dist0 = result.quasi_dists[0]
key_new = [str(key/2**m) for key in list(dist0.keys())]
dist1 = dict(zip(key_new, dist0.values()))
fig, ax = plt.subplots(1,2)
plot_histogram(dist0, ax=ax[0])
plot_histogram(dist1, ax=ax[1])
plt.tight_layout()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
import qiskit_experiments as qe
rb = qe.randomized_benchmarking
from scipy.optimize import curve_fit
# import the bayesian packages
import pymc3 as pm
import arviz as az
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
# choice of "simulation, "real", "retrieve"
option = "retrieve"
# Determine the backend
if option == "simulation":
from qiskit.test.mock import FakeAthens
backend = FakeAthens()
hardware = 'ibmq_athens' # hardware reference
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_athens')
backend = device
hardware = device.name() # hardware used
# RB design
q_list = [0,1]
lengths = [1, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400, 450, 500]
num_samples = 5
seed = 1010
num_qubits = len(q_list)
scale = (2 ** num_qubits - 1) / (2 ** num_qubits)
shots = 1024
if option != "retrieve":
exp = rb.RBExperiment(q_list, lengths, num_samples=num_samples, seed=seed)
eda= exp.run(backend)
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# one or two qubits for retrieve and for the plots
list_bitstring = ['0', '00']
# obtain the current count values
Y_list = []
if option == "simulation":
for i_sample in range(num_samples*len(lengths)):
Y_list.append(eda.data[i_sample]['counts']\
[eda.data[i_sample]['metadata']['ylabel']])
else: # specify job (fresh job or run some time ago)
job = backend.retrieve_job('6097aed0a4885edfb19508fa') # athens 01'
for rbseed, result in enumerate(job.result().get_counts()):
total_counts = 0
for key,val in result.items():
if key in list_bitstring:
total_counts += val
Y_list.append(total_counts)
Y = np.array(Y_list).reshape(num_samples, len(lengths))
#get LSF EPC and priors
if option != "retrieve":
popt = eda._analysis_results[0]['popt']
pcov = eda._analysis_results[0]['pcov']
else: # manual entry (here for job ''6097aed0a4885edfb19508fa')
popt = [0.7207075, 0.95899375, 0.2545933 ]
pcov = [[ 2.53455272e-04, -9.10001034e-06, -1.29839515e-05],
[-9.10001034e-06, 9.55193998e-06, -7.07822420e-06],
[-1.29839515e-05, -7.07822420e-06, 2.07452483e-05]]
alpha_ref=popt[1]
mu_AB= [popt[0],popt[2]]
alpha_ref_err = np.sqrt(pcov[1][1])
EPC = scale*(1-alpha_ref)
EPC_err = scale*alpha_ref_err
cov_AB= [0.0001, 0.0001]
alpha_lower=0.8
alpha_upper=0.999
sigma_theta = .004
pooled_model = bf.get_bayesian_model(model_type="pooled",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(pooled_model)
with pooled_model:
trace_p= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_p)
with pooled_model:
azp_summary = az.summary(trace_p, hdi_prob=.94, round_to=6, kind="all")
azp_summary
epc_p =scale*(1 - azp_summary['mean']['alpha'])
epc_p_err = scale* (azp_summary['sd']['alpha'])
with pooled_model:
ax = az.plot_posterior(trace_p, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Pooled Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_p, epc_p_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
hierarchical_model = bf.get_bayesian_model(model_type="hierarchical",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(hierarchical_model)
with hierarchical_model:
trace_h= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_h)
with hierarchical_model:
azh_summary = az.summary(trace_h, hdi_prob=.94, round_to=6,
kind="all", var_names=["~GSP"])
azh_summary
epc_h =scale*(1 - azh_summary['mean']['alpha'])
epc_h_err = scale* (azh_summary['sd']['alpha'])
with hierarchical_model:
ax = az.plot_posterior(trace_h, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Hierarchical Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_h, epc_h_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
# compare LSF and SMC
print("Model: Frequentist Bayesian")
print(" LSF pooled hierarchical")
print("EPC {0:.5f} {1:.5f} {2:.5f}"
.format(EPC, epc_p, epc_h))
print("ERROR ± {0:.5f} ± {1:.5f} ± {2:.5f}"
.format(EPC_err, epc_p_err, epc_h_err))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
#fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(num_samples):
plt.scatter(lengths, Y[i_seed,:]/1024, label = "data",
marker="+",color="purple")
plt.plot(np.array(lengths)-0.5,azp_summary['mean']['AB[0]']\
*azp_summary['mean']['alpha']**lengths+\
azp_summary['mean']['AB[1]']-0.002,'o-',color="c")
plt.plot(np.array(lengths)+0.5,azh_summary['mean']['AB[0]']\
*azh_summary['mean']['alpha']**\
lengths+azh_summary['mean']['AB[1]']+0.002,'o-',color="orange")
plt.legend(("Pooled Model",
"Hierarchical Model"))
plt.set_title("RB_process: standard "+str(q_list)+\
", device: "+hardware+', backend: '+backend.name(),
fontsize=14);
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
# Create a circuit with a register of three qubits
circ = QuantumCircuit(3)
# H gate on qubit 0, putting this qubit in a superposition of |0> + |1>.
circ.h(0)
# A CX (CNOT) gate on control qubit 0 and target qubit 1 generating a Bell state.
circ.cx(0, 1)
# CX (CNOT) gate on control qubit 0 and target qubit 2 resulting in a GHZ state.
circ.cx(0, 2)
# Draw the circuit
circ.draw('mpl')
|
https://github.com/Hayatto9217/Qiskit2
|
Hayatto9217
|
import matplotlib.pyplot as plt
import numpy as np
from math import pi
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, transpile
from qiskit.tools.visualization import circuit_drawer
from qiskit.quantum_info import state_fidelity
from qiskit import BasicAer
backend = BasicAer.get_backend('unitary_simulator')
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.u(pi/2, pi/4,pi/8,q)
qc.draw()
job =backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#Pゲート
qc =QuantumCircuit(q)
qc.p(pi/2, q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#恒等ゲートId = p(0)
qc = QuantumCircuit(q)
qc.id(q)
qc.draw()
job =backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#パウリゲート
qc = QuantumCircuit(q)
qc.x(q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#ビットアンドフェーズフリップゲート
qc = QuantumCircuit(q)
qc.y(q)
qc.draw()
job =backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#S=√Zフェーズ
qc = QuantumCircuit(q)
qc.s(q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#H アダマールゲート
qc = QuantumCircuit(q)
qc.h(q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#S転置共役
qc = QuantumCircuit(q)
qc.sdg(q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
qc = QuantumCircuit(q)
qc.t(q)
qc.draw()
job =backend.run(transpile(qc,backend))
job.result().get_unitary(qc, decimals=3)
#標準回転
qc = QuantumCircuit(q)
qc.rx(pi/2, q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#Y軸まわりの回転
qc = QuantumCircuit(q)
qc.ry(pi/2, q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#Zまわりの回転
qc= QuantumCircuit(q)
qc.rz(pi/2,q)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#2量子ビット場合
q= QuantumRegister(2)
#制御パウリゲート
qc = QuantumCircuit(q)
qc.cx(q[0],q[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#制御Yゲート
qc = QuantumCircuit(q)
qc.cy(q[0],q[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#制御Zゲート
qc = QuantumCircuit(q)
qc.cz(q[0],q[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#制御回転ゲート
qc = QuantumCircuit(q)
qc.crz(pi/2,q[0],q[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#制御位相回転
qc = QuantumCircuit(q)
qc.cp(pi/2,q[0],q[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#制御u回転
qc = QuantumCircuit(q)
qc.cu(pi/2,pi/2,pi/2,0,q[0],q[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#SWAPGETA
qc = QuantumCircuit(q)
qc.swap(q[0],[1])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
q = QuantumRegister(3)
#ToffoliゲートCXXゲート
qc = QuantumCircuit(q)
qc.ccx(q[0],q[1],q[2])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#CSWAPゲート
qc = QuantumCircuit(q)
qc.cswap(q[0],q[1],q[2])
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_unitary(qc, decimals=3)
#非ユニタリ操作
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.measure(q,c)
qc.draw()
backend =BasicAer.get_backend('qasm_simulator')
job = backend.run(transpile(qc,backend))
job.result().get_counts(qc)
qc = QuantumCircuit(q,c)
qc.h(q)
qc.measure(q,c)
qc.draw()
job = backend.run(transpile(qc,backend))
job.result().get_counts(qc)
#reset
qc = QuantumCircuit(q,c)
qc.h(q)
qc.reset(q[0])
qc.measure(q,c)
qc.draw()
job = backend.run(transpile(qc, backend))
job.result().get_counts(qc)
qc = QuantumCircuit(q,c)
qc.h(q)
qc.measure(q,c)
qc.x(q[0]).c_if(c,0)
qc.measure(q, c)
qc.draw()
import math
desired_vector = [
1 / math.sqrt(16) * complex(0, 1),
1 / math.sqrt(8) * complex(1, 0),
1 / math.sqrt(16) * complex(1, 1),
0,
0,
1 / math.sqrt(8) * complex(1, 2),
1 / math.sqrt(16) * complex(1, 0),
0]
q = QuantumRegister(3)
qc = QuantumCircuit(q)
qc.initialize(desired_vector, [q[0],q[1],q[2]])
qc.draw()
backend = BasicAer.get_backend('statevector_simulator')
job =backend.run(transpile(qc, backend))
qc_state = job.result().get_statevector(qc)
qc_state
state_fidelity(desired_vector, qc_state)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem
from qiskit.circuit.library import LinearAmplitudeFunction
from qiskit_aer.primitives import Sampler
from qiskit_finance.circuit.library import LogNormalDistribution
# number of qubits to represent the uncertainty
num_uncertainty_qubits = 3
# parameters for considered random distribution
S = 2.0 # initial spot price
vol = 0.4 # volatility of 40%
r = 0.05 # annual interest rate of 4%
T = 40 / 365 # 40 days to maturity
# resulting parameters for log-normal distribution
mu = (r - 0.5 * vol**2) * T + np.log(S)
sigma = vol * np.sqrt(T)
mean = np.exp(mu + sigma**2 / 2)
variance = (np.exp(sigma**2) - 1) * np.exp(2 * mu + sigma**2)
stddev = np.sqrt(variance)
# lowest and highest value considered for the spot price; in between, an equidistant discretization is considered.
low = np.maximum(0, mean - 3 * stddev)
high = mean + 3 * stddev
# construct A operator for QAE for the payoff function by
# composing the uncertainty model and the objective
uncertainty_model = LogNormalDistribution(
num_uncertainty_qubits, mu=mu, sigma=sigma**2, bounds=(low, high)
)
# plot probability distribution
x = uncertainty_model.values
y = uncertainty_model.probabilities
plt.bar(x, y, width=0.2)
plt.xticks(x, size=15, rotation=90)
plt.yticks(size=15)
plt.grid()
plt.xlabel("Spot Price at Maturity $S_T$ (\$)", size=15)
plt.ylabel("Probability ($\%$)", size=15)
plt.show()
# set the strike price (should be within the low and the high value of the uncertainty)
strike_price = 2.126
# set the approximation scaling for the payoff function
rescaling_factor = 0.25
# setup piecewise linear objective fcuntion
breakpoints = [low, strike_price]
slopes = [-1, 0]
offsets = [strike_price - low, 0]
f_min = 0
f_max = strike_price - low
european_put_objective = LinearAmplitudeFunction(
num_uncertainty_qubits,
slopes,
offsets,
domain=(low, high),
image=(f_min, f_max),
breakpoints=breakpoints,
rescaling_factor=rescaling_factor,
)
# construct A operator for QAE for the payoff function by
# composing the uncertainty model and the objective
european_put = european_put_objective.compose(uncertainty_model, front=True)
# plot exact payoff function (evaluated on the grid of the uncertainty model)
x = uncertainty_model.values
y = np.maximum(0, strike_price - x)
plt.plot(x, y, "ro-")
plt.grid()
plt.title("Payoff Function", size=15)
plt.xlabel("Spot Price", size=15)
plt.ylabel("Payoff", size=15)
plt.xticks(x, size=15, rotation=90)
plt.yticks(size=15)
plt.show()
# evaluate exact expected value (normalized to the [0, 1] interval)
exact_value = np.dot(uncertainty_model.probabilities, y)
exact_delta = -sum(uncertainty_model.probabilities[x <= strike_price])
print("exact expected value:\t%.4f" % exact_value)
print("exact delta value: \t%.4f" % exact_delta)
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
problem = EstimationProblem(
state_preparation=european_put,
objective_qubits=[num_uncertainty_qubits],
post_processing=european_put_objective.post_processing,
)
# construct amplitude estimation
ae = IterativeAmplitudeEstimation(
epsilon_target=epsilon, alpha=alpha, sampler=Sampler(run_options={"shots": 100})
)
result = ae.estimate(problem)
conf_int = np.array(result.confidence_interval_processed)
print("Exact value: \t%.4f" % exact_value)
print("Estimated value: \t%.4f" % (result.estimation_processed))
print("Confidence interval:\t[%.4f, %.4f]" % tuple(conf_int))
# setup piecewise linear objective fcuntion
breakpoints = [low, strike_price]
slopes = [0, 0]
offsets = [1, 0]
f_min = 0
f_max = 1
european_put_delta_objective = LinearAmplitudeFunction(
num_uncertainty_qubits,
slopes,
offsets,
domain=(low, high),
image=(f_min, f_max),
breakpoints=breakpoints,
)
# construct circuit for payoff function
european_put_delta = european_put_delta_objective.compose(uncertainty_model, front=True)
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
problem = EstimationProblem(
state_preparation=european_put_delta, objective_qubits=[num_uncertainty_qubits]
)
# construct amplitude estimation
ae_delta = IterativeAmplitudeEstimation(
epsilon_target=epsilon, alpha=alpha, sampler=Sampler(run_options={"shots": 100})
)
result_delta = ae_delta.estimate(problem)
conf_int = -np.array(result_delta.confidence_interval)[::-1]
print("Exact delta: \t%.4f" % exact_delta)
print("Esimated value: \t%.4f" % -result_delta.estimation)
print("Confidence interval: \t[%.4f, %.4f]" % tuple(conf_int))
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/noamsgl/IBMAscolaChallenge
|
noamsgl
|
from qiskit import IBMQ
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.device import thermal_relaxation_values, gate_param_values
from qiskit.providers.aer.noise.noiseerror import NoiseError
provider = IBMQ.load_account()
print("Available Backends: ", provider.backends())
for be in provider.backends():
try:
print()
print("*" * 20 + be.name() + "*" * 20)
noise_model = NoiseModel.from_backend(be)
# readout
print("Readout Error on q_0:", noise_model._local_readout_errors['0'])
# thermal and depol
properties = be.properties()
relax_params = thermal_relaxation_values(properties) # T1, T2 (microS) and frequency values (GHz)
t1, t2, freq = relax_params[0]
u3_length = properties.gate_length('u3', 0)
print("t1: " + str(t1))
print("t2: " + str(t2))
print("freq: " + str(freq))
print("universal_error gate length: " + str(u3_length))
# depol
device_gate_params = gate_param_values(properties)
except NoiseError:
pass
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw('mpl')
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_aer.utils import approximate_quantum_error, approximate_noise_model
import numpy as np
# Import Aer QuantumError functions that will be used
from qiskit_aer.noise import amplitude_damping_error, reset_error, pauli_error
from qiskit.quantum_info import Kraus
gamma = 0.23
error = amplitude_damping_error(gamma)
results = approximate_quantum_error(error, operator_string="reset")
print(results)
p = (1 + gamma - np.sqrt(1 - gamma)) / 2
q = 0
print("")
print("Expected results:")
print("P(0) = {}".format(1-(p+q)))
print("P(1) = {}".format(p))
print("P(2) = {}".format(q))
gamma = 0.23
K0 = np.array([[1,0],[0,np.sqrt(1-gamma)]])
K1 = np.array([[0,np.sqrt(gamma)],[0,0]])
results = approximate_quantum_error(Kraus([K0, K1]), operator_string="reset")
print(results)
reset_to_0 = Kraus([np.array([[1,0],[0,0]]), np.array([[0,1],[0,0]])])
reset_to_1 = Kraus([np.array([[0,0],[1,0]]), np.array([[0,0],[0,1]])])
reset_kraus = [reset_to_0, reset_to_1]
gamma = 0.23
error = amplitude_damping_error(gamma)
results = approximate_quantum_error(error, operator_list=reset_kraus)
print(results)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Multiple-Control, Multiple-Target Gate."""
from __future__ import annotations
from collections.abc import Callable
from qiskit import circuit
from qiskit.circuit import ControlledGate, Gate, Qubit, QuantumRegister, QuantumCircuit
from qiskit.exceptions import QiskitError
from ..standard_gates import XGate, YGate, ZGate, HGate, TGate, TdgGate, SGate, SdgGate
class MCMT(QuantumCircuit):
"""The multi-controlled multi-target gate, for an arbitrary singly controlled target gate.
For example, the H gate controlled on 3 qubits and acting on 2 target qubit is represented as:
.. parsed-literal::
───■────
│
───■────
│
───■────
┌──┴───┐
┤0 ├
│ 2-H │
┤1 ├
└──────┘
This default implementations requires no ancilla qubits, by broadcasting the target gate
to the number of target qubits and using Qiskit's generic control routine to control the
broadcasted target on the control qubits. If ancilla qubits are available, a more efficient
variant using the so-called V-chain decomposition can be used. This is implemented in
:class:`~qiskit.circuit.library.MCMTVChain`.
"""
def __init__(
self,
gate: Gate | Callable[[QuantumCircuit, Qubit, Qubit], circuit.Instruction],
num_ctrl_qubits: int,
num_target_qubits: int,
) -> None:
"""Create a new multi-control multi-target gate.
Args:
gate: The gate to be applied controlled on the control qubits and applied to the target
qubits. Can be either a Gate or a circuit method.
If it is a callable, it will be casted to a Gate.
num_ctrl_qubits: The number of control qubits.
num_target_qubits: The number of target qubits.
Raises:
AttributeError: If the gate cannot be casted to a controlled gate.
AttributeError: If the number of controls or targets is 0.
"""
if num_ctrl_qubits == 0 or num_target_qubits == 0:
raise AttributeError("Need at least one control and one target qubit.")
# set the internal properties and determine the number of qubits
self.gate = self._identify_gate(gate)
self.num_ctrl_qubits = num_ctrl_qubits
self.num_target_qubits = num_target_qubits
num_qubits = num_ctrl_qubits + num_target_qubits + self.num_ancilla_qubits
# initialize the circuit object
super().__init__(num_qubits, name="mcmt")
self._label = f"{num_target_qubits}-{self.gate.name.capitalize()}"
# build the circuit
self._build()
def _build(self):
"""Define the MCMT gate without ancillas."""
if self.num_target_qubits == 1:
# no broadcasting needed (makes for better circuit diagrams)
broadcasted_gate = self.gate
else:
broadcasted = QuantumCircuit(self.num_target_qubits, name=self._label)
for target in list(range(self.num_target_qubits)):
broadcasted.append(self.gate, [target], [])
broadcasted_gate = broadcasted.to_gate()
mcmt_gate = broadcasted_gate.control(self.num_ctrl_qubits)
self.append(mcmt_gate, self.qubits, [])
@property
def num_ancilla_qubits(self):
"""Return the number of ancillas."""
return 0
def _identify_gate(self, gate):
"""Case the gate input to a gate."""
valid_gates = {
"ch": HGate(),
"cx": XGate(),
"cy": YGate(),
"cz": ZGate(),
"h": HGate(),
"s": SGate(),
"sdg": SdgGate(),
"x": XGate(),
"y": YGate(),
"z": ZGate(),
"t": TGate(),
"tdg": TdgGate(),
}
if isinstance(gate, ControlledGate):
base_gate = gate.base_gate
elif isinstance(gate, Gate):
if gate.num_qubits != 1:
raise AttributeError("Base gate must act on one qubit only.")
base_gate = gate
elif isinstance(gate, QuantumCircuit):
if gate.num_qubits != 1:
raise AttributeError(
"The circuit you specified as control gate can only have one qubit!"
)
base_gate = gate.to_gate() # raises error if circuit contains non-unitary instructions
else:
if callable(gate): # identify via name of the passed function
name = gate.__name__
elif isinstance(gate, str):
name = gate
else:
raise AttributeError(f"Invalid gate specified: {gate}")
base_gate = valid_gates[name]
return base_gate
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return the controlled version of the MCMT circuit."""
if ctrl_state is None: # TODO add ctrl state implementation by adding X gates
return MCMT(self.gate, self.num_ctrl_qubits + num_ctrl_qubits, self.num_target_qubits)
return super().control(num_ctrl_qubits, label, ctrl_state)
def inverse(self):
"""Return the inverse MCMT circuit, which is itself."""
return MCMT(self.gate, self.num_ctrl_qubits, self.num_target_qubits)
class MCMTVChain(MCMT):
"""The MCMT implementation using the CCX V-chain.
This implementation requires ancillas but is decomposed into a much shallower circuit
than the default implementation in :class:`~qiskit.circuit.library.MCMT`.
**Expanded Circuit:**
.. plot::
from qiskit.circuit.library import MCMTVChain, ZGate
from qiskit.tools.jupyter.library import _generate_circuit_library_visualization
circuit = MCMTVChain(ZGate(), 2, 2)
_generate_circuit_library_visualization(circuit.decompose())
**Examples:**
>>> from qiskit.circuit.library import HGate
>>> MCMTVChain(HGate(), 3, 2).draw()
q_0: ──■────────────────────────■──
│ │
q_1: ──■────────────────────────■──
│ │
q_2: ──┼────■──────────────■────┼──
│ │ ┌───┐ │ │
q_3: ──┼────┼──┤ H ├───────┼────┼──
│ │ └─┬─┘┌───┐ │ │
q_4: ──┼────┼────┼──┤ H ├──┼────┼──
┌─┴─┐ │ │ └─┬─┘ │ ┌─┴─┐
q_5: ┤ X ├──■────┼────┼────■──┤ X ├
└───┘┌─┴─┐ │ │ ┌─┴─┐└───┘
q_6: ─────┤ X ├──■────■──┤ X ├─────
└───┘ └───┘
"""
def _build(self):
"""Define the MCMT gate."""
control_qubits = self.qubits[: self.num_ctrl_qubits]
target_qubits = self.qubits[
self.num_ctrl_qubits : self.num_ctrl_qubits + self.num_target_qubits
]
ancilla_qubits = self.qubits[self.num_ctrl_qubits + self.num_target_qubits :]
if len(ancilla_qubits) > 0:
master_control = ancilla_qubits[-1]
else:
master_control = control_qubits[0]
self._ccx_v_chain_rule(control_qubits, ancilla_qubits, reverse=False)
for qubit in target_qubits:
self.append(self.gate.control(), [master_control, qubit], [])
self._ccx_v_chain_rule(control_qubits, ancilla_qubits, reverse=True)
@property
def num_ancilla_qubits(self):
"""Return the number of ancilla qubits required."""
return max(0, self.num_ctrl_qubits - 1)
def _ccx_v_chain_rule(
self,
control_qubits: QuantumRegister | list[Qubit],
ancilla_qubits: QuantumRegister | list[Qubit],
reverse: bool = False,
) -> None:
"""Get the rule for the CCX V-chain.
The CCX V-chain progressively computes the CCX of the control qubits and puts the final
result in the last ancillary qubit.
Args:
control_qubits: The control qubits.
ancilla_qubits: The ancilla qubits.
reverse: If True, compute the chain down to the qubit. If False, compute upwards.
Returns:
The rule for the (reversed) CCX V-chain.
Raises:
QiskitError: If an insufficient number of ancilla qubits was provided.
"""
if len(ancilla_qubits) == 0:
return
if len(ancilla_qubits) < len(control_qubits) - 1:
raise QiskitError("Insufficient number of ancilla qubits.")
iterations = list(enumerate(range(2, len(control_qubits))))
if not reverse:
self.ccx(control_qubits[0], control_qubits[1], ancilla_qubits[0])
for i, j in iterations:
self.ccx(control_qubits[j], ancilla_qubits[i], ancilla_qubits[i + 1])
else:
for i, j in reversed(iterations):
self.ccx(control_qubits[j], ancilla_qubits[i], ancilla_qubits[i + 1])
self.ccx(control_qubits[0], control_qubits[1], ancilla_qubits[0])
def inverse(self):
return MCMTVChain(self.gate, self.num_ctrl_qubits, self.num_target_qubits)
|
https://github.com/ronitd2002/IBM-Quantum-challenge-2024
|
ronitd2002
|
# transpile_parallel.py
from qiskit import QuantumCircuit
from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager
from qiskit_transpiler_service.transpiler_service import TranspilerService
from qiskit_serverless import get_arguments, save_result, distribute_task, get
from qiskit_ibm_runtime import QiskitRuntimeService
from timeit import default_timer as timer
@distribute_task(target={"cpu": 2})
def transpile_parallel(circuit: QuantumCircuit, config):
"""Distributed transpilation for an abstract circuit into an ISA circuit for a given backend."""
transpiled_circuit = config.run(circuit)
return transpiled_circuit
# Get program arguments
arguments = get_arguments()
circuits = arguments.get("circuits")
backend_name = arguments.get("backend_name")
# Get backend
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.get_backend(backend_name)
# Define Configs
optimization_levels = [1,2,3]
pass_managers = [generate_preset_pass_manager(optimization_level=level, backend=backend) for level in optimization_levels]
transpiler_services = [
{'service': TranspilerService(backend_name="ibm_brisbane", ai="false", optimization_level=3,), 'ai': False, 'optimization_level': 3},
{'service': TranspilerService(backend_name="ibm_brisbane", ai="false", optimization_level=3,), 'ai': True, 'optimization_level': 3}
]
configs = pass_managers + transpiler_services
# Start process
print("Starting timer")
start = timer()
# run distributed tasks as async function
# we get task references as a return type
sample_task_references = []
for circuit in circuits:
sample_task_references.append([transpile_parallel(circuit, config) for config in configs])
# now we need to collect results from task references
results = get([task for subtasks in sample_task_references for task in subtasks])
end = timer()
# Record execution time
execution_time_serverless = end-start
print("Execution time: ", execution_time_serverless)
save_result({
"transpiled_circuits": results,
"execution_time" : execution_time_serverless
})
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
top = QuantumCircuit(1)
top.x(0);
bottom = QuantumCircuit(2)
bottom.cry(0.2, 0, 1);
tensored = bottom.tensor(top)
tensored.draw('mpl')
|
https://github.com/mentesniker/Quantum-Cryptography
|
mentesniker
|
%matplotlib inline
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from random import randint
import hashlib
#These two functions were taken from https://stackoverflow.com/questions/10237926/convert-string-to-list-of-bits-and-viceversa
def tobits(s):
result = []
for c in s:
bits = bin(ord(c))[2:]
bits = '00000000'[len(bits):] + bits
result.extend([int(b) for b in bits])
return ''.join([str(x) for x in result])
def frombits(bits):
chars = []
for b in range(int(len(bits) / 8)):
byte = bits[b*8:(b+1)*8]
chars.append(chr(int(''.join([str(bit) for bit in byte]), 2)))
return ''.join(chars)
#Alice cell, Bob can't see what's going in on here
m0 = tobits("Yes I want to go to the cinema with you!!!")
m1 = tobits("No I dont want to go to the cinema with you...")
messageToSend = m1
Alice_bases = [randint(0,1) for x in range(len(messageToSend))]
qubits = list()
for i in range(len(Alice_bases)):
mycircuit = QuantumCircuit(1,1)
if(Alice_bases[i] == 0):
if(messageToSend[i] == "1"):
mycircuit.x(0)
else:
if(messageToSend[i] == "0"):
mycircuit.h(0)
else:
mycircuit.x(0)
mycircuit.h(0)
qubits.append(mycircuit)
#Bob cell, Alice can't see what's going in on here
backend = Aer.get_backend('qasm_simulator')
measurements = list()
for i in range(len(Alice_bases)):
qubit = qubits[i]
if(Alice_bases[i] == 0):
qubit.measure(0,0)
else:
qubit.h(0)
qubit.measure(0,0)
result = execute(qubit, backend, shots=1, memory=True).result()
measurements.append(int(result.get_memory()[0]))
print("Alice message was: " + frombits(measurements))
|
https://github.com/AnkRaw/Quantum-Convolutional-Neural-Network
|
AnkRaw
|
# Importing Libraries
import torch
from torch import cat, no_grad, manual_seed
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.optim as optim
from torch.nn import (
Module,
Conv2d,
Linear,
Dropout2d,
NLLLoss
)
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from qiskit_machine_learning.neural_networks import EstimatorQNN
from qiskit_machine_learning.connectors import TorchConnector
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit import QuantumCircuit
from qiskit.visualization import circuit_drawer
# Imports for CIFAR-10s
from torchvision.datasets import CIFAR10
from torchvision import transforms
def prepare_data(X, labels_to_keep, batch_size):
# Filtering out labels (originally 0-9), leaving only labels 0 and 1
filtered_indices = [i for i in range(len(X.targets)) if X.targets[i] in labels_to_keep]
X.data = X.data[filtered_indices]
X.targets = [X.targets[i] for i in filtered_indices]
# Defining dataloader with filtered data
loader = DataLoader(X, batch_size=batch_size, shuffle=True)
return loader
# Set seed for reproducibility
manual_seed(42)
# CIFAR-10 data transformation
transform = transforms.Compose([
transforms.ToTensor(), # convert the images to tensors
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalization usign mean and std.
])
labels_to_keep = [0, 1]
batch_size = 1
# Preparing Train Data
X_train = CIFAR10(root="./data", train=True, download=True, transform=transform)
train_loader = prepare_data(X_train, labels_to_keep, batch_size)
# Preparing Test Data
X_test = CIFAR10(root="./data", train=False, download=True, transform=transform)
test_loader = prepare_data(X_test, labels_to_keep, batch_size)
print(f"Training dataset size: {len(train_loader.dataset)}")
print(f"Test dataset size: {len(test_loader.dataset)}")
# Defining and creating QNN
def create_qnn():
feature_map = ZZFeatureMap(2) # ZZFeatureMap with 2 bits, entanglement between qubits based on the pairwise product of input features.
ansatz = RealAmplitudes(2, reps=1) # parameters (angles, in the case of RealAmplitudes) that are adjusted during the training process to optimize the quantum model for a specific task.
qc = QuantumCircuit(2)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
qnn = EstimatorQNN(
circuit=qc,
input_params=feature_map.parameters,
weight_params=ansatz.parameters,
input_gradients=True,
)
return qnn
qnn = create_qnn()
# Visualizing the QNN circuit
circuit_drawer(qnn.circuit, output='mpl')
# Defining torch NN module
class Net(Module):
def __init__(self, qnn):
super().__init__()
self.conv1 = Conv2d(3, 16, kernel_size=3, padding=1)
self.conv2 = Conv2d(16, 32, kernel_size=3, padding=1)
self.dropout = Dropout2d()
self.fc1 = Linear(32 * 8 * 8, 64)
self.fc2 = Linear(64, 2) # 2-dimensional input to QNN
self.qnn = TorchConnector(qnn)
self.fc3 = Linear(1, 1) # 1-dimensional output from QNN
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x)
x = self.fc3(x)
return cat((x, 1 - x), -1)
# Creating model
model = Net(qnn)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Defining model, optimizer, and loss function
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = NLLLoss()
# Starting training
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device) # Move data to GPU
optimizer.zero_grad(set_to_none=True)
output = model(data)
loss = loss_func(output, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss) / len(total_loss))
print("Training [{:.0f}%]\tLoss: {:.4f}".format(100.0 * (epoch + 1) / epochs, loss_list[-1]))
# Plotting loss convergence
plt.plot(loss_list)
plt.title("Hybrid NN Training Convergence")
plt.xlabel("Training Iterations")
plt.ylabel("Neg. Log Likelihood Loss")
plt.show()
# Saving the model
torch.save( model.state_dict(), "model_cifar10_10EPOCHS.pt")
# Loading the model
qnn_cifar10 = create_qnn()
model_cifar10 = Net(qnn_cifar10)
model_cifar10.load_state_dict(torch.load("model_cifar10.pt"))
correct = 0
total = 0
model_cifar10.eval()
with torch.no_grad():
for data, target in test_loader:
output = model_cifar10(data)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
# Calculating and print test accuracy
test_accuracy = correct / total * 100
print(f"Test Accuracy: {test_accuracy:.2f}%")
# Plotting predicted labels
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model_cifar10.eval()
with no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model_cifar10(data)
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(np.transpose(data[0].numpy(), (1, 2, 0)))
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title("Predicted {0}\n Actual {1}".format(pred.item(), target.item()))
count += 1
plt.show()
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, transpile
from qiskit.providers.fake_provider import FakeAuckland
backend = FakeAuckland()
ghz = QuantumCircuit(15)
ghz.h(0)
ghz.cx(0, range(1, 15))
depths = []
for _ in range(100):
depths.append(
transpile(
ghz,
backend,
layout_method='trivial' # Fixed layout mapped in circuit order
).depth()
)
plt.figure(figsize=(8, 6))
plt.hist(depths, align='left', color='#AC557C')
plt.xlabel('Depth', fontsize=14)
plt.ylabel('Counts', fontsize=14);
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
def gcd(a, b):
r = 1
while r != 0:
r = a%b
if r == 0:
break
a = b
b = r
return b
print(gcd(630, 56))
N = 20
a = 7
r_vals = np.arange(2, N)
a_power_r = np.power(a, r_vals)
print(a_power_r)
a_r_mod_N = a_power_r%N
import matplotlib.pyplot as plt
plt.plot(r_vals, a_r_mod_N)
def modulo(a, r, N):
mod = a
for i in range(r - 1):
mod = (mod*a)%N
return mod
print(modulo(7, 4, 15))
def period(a, N):
mod = a
for i in range(2, N):
mod = (mod*a)%N
if mod == 1:
return i
print(period(7, 15))
print(period(11, 31))
from qiskit import *
n = 4
circ = QuantumCircuit(n, n)
def swap(circuit, i, j):
circuit.cx(i, j)
circuit.cx(j, i)
circuit.cx(i, j)
return circuit
def unitary_7(circuit):
for i in range(n):
circuit.x(i)
circuit = swap(circuit, 1, 2)
circuit = swap(circuit, 2, 3)
circuit = swap(circuit, 0, 3)
circuit.barrier()
return circuit
# circ.x(0)
# circ.x(1)
# circ.barrier()
r = 4
for j in range(r):
circ = unitary_7(circ)
circ.draw()
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
results = job.result()
psi = results.get_statevector(circ)
for i in range(len(psi)):
print(f"{bin(i)} :" + str(abs(psi[i]**2)))
|
https://github.com/shesha-raghunathan/DATE2019-qiskit-tutorial
|
shesha-raghunathan
|
from datasets import *
from qiskit import Aer
from qiskit_aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name
from qiskit_aqua import run_algorithm, QuantumInstance
from qiskit_aqua.input import SVMInput
from qiskit_aqua.components.feature_maps import SecondOrderExpansion
from qiskit_aqua.algorithms import QSVMKernel
feature_dim = 2 # dimension of each data point
training_dataset_size = 20
testing_dataset_size = 10
random_seed = 10598
shots = 1024
sample_Total, training_input, test_input, class_labels = ad_hoc_data(training_size=training_dataset_size,
test_size=testing_dataset_size,
n=feature_dim, gap=0.3, PLOT_DATA=False)
datapoints, class_to_label = split_dataset_to_data_and_labels(test_input)
print(class_to_label)
params = {
'problem': {'name': 'svm_classification', 'random_seed': random_seed},
'algorithm': {
'name': 'QSVM.Kernel'
},
'backend': {'shots': shots},
'feature_map': {'name': 'SecondOrderExpansion', 'depth': 2, 'entanglement': 'linear'}
}
backend = Aer.get_backend('qasm_simulator')
algo_input = SVMInput(training_input, test_input, datapoints[0])
result = run_algorithm(params, algo_input, backend=backend)
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()
print("testing success ratio: ", result['testing_accuracy'])
print("predicted classes:", result['predicted_classes'])
backend = Aer.get_backend('qasm_simulator')
feature_map = SecondOrderExpansion(num_qubits=feature_dim, depth=2, entangler_map={0: [1]})
svm = QSVMKernel(feature_map, training_input, test_input, None)# the data for prediction can be feeded later.
svm.random_seed = random_seed
quantum_instance = QuantumInstance(backend, shots=shots, seed=random_seed, seed_mapper=random_seed)
result = svm.run(quantum_instance)
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()
print("testing success ratio: ", result['testing_accuracy'])
predicted_labels = svm.predict(datapoints[0])
predicted_classes = map_label_to_class_name(predicted_labels, svm.label_to_class)
print("ground truth: {}".format(datapoints[1]))
print("preduction: {}".format(predicted_labels))
|
https://github.com/jatin-47/QGSS-2021
|
jatin-47
|
import networkx as nx
import numpy as np
import plotly.graph_objects as go
import matplotlib as mpl
import pandas as pd
from IPython.display import clear_output
from plotly.subplots import make_subplots
from matplotlib import pyplot as plt
from qiskit import Aer
from qiskit import QuantumCircuit
from qiskit.visualization import plot_state_city
from qiskit.algorithms.optimizers import COBYLA, SLSQP, ADAM
from time import time
from copy import copy
from typing import List
from qc_grader.graph_util import display_maxcut_widget, QAOA_widget, graphs
mpl.rcParams['figure.dpi'] = 300
from qiskit.circuit import Parameter, ParameterVector
#Parameters are initialized with a simple string identifier
parameter_0 = Parameter('θ[0]')
parameter_1 = Parameter('θ[1]')
circuit = QuantumCircuit(1)
#We can then pass the initialized parameters as the rotation angle argument to the Rx and Ry gates
circuit.ry(theta = parameter_0, qubit = 0)
circuit.rx(theta = parameter_1, qubit = 0)
circuit.draw('mpl')
parameter = Parameter('θ')
circuit = QuantumCircuit(1)
circuit.ry(theta = parameter, qubit = 0)
circuit.rx(theta = parameter, qubit = 0)
circuit.draw('mpl')
#Set the number of layers and qubits
n=3
num_layers = 2
#ParameterVectors are initialized with a string identifier and an integer specifying the vector length
parameters = ParameterVector('θ', n*(num_layers+1))
circuit = QuantumCircuit(n, n)
for layer in range(num_layers):
#Appending the parameterized Ry gates using parameters from the vector constructed above
for i in range(n):
circuit.ry(parameters[n*layer+i], i)
circuit.barrier()
#Appending the entangling CNOT gates
for i in range(n):
for j in range(i):
circuit.cx(j,i)
circuit.barrier()
#Appending one additional layer of parameterized Ry gates
for i in range(n):
circuit.ry(parameters[n*num_layers+i], i)
circuit.barrier()
circuit.draw('mpl')
print(circuit.parameters)
#Create parameter dictionary with random values to bind
param_dict = {parameter: np.random.random() for parameter in parameters}
print(param_dict)
#Assign parameters using the assign_parameters method
bound_circuit = circuit.assign_parameters(parameters = param_dict)
bound_circuit.draw('mpl')
new_parameters = ParameterVector('Ψ',9)
new_circuit = circuit.assign_parameters(parameters = [k*new_parameters[i] for k in range(9)])
new_circuit.draw('mpl')
#Run the circuit with assigned parameters on Aer's statevector simulator
simulator = Aer.get_backend('statevector_simulator')
result = simulator.run(bound_circuit).result()
statevector = result.get_statevector(bound_circuit)
plot_state_city(statevector)
#The following line produces an error when run because 'circuit' still contains non-assigned parameters
#result = simulator.run(circuit).result()
for key in graphs.keys():
print(key)
graph = nx.Graph()
#Add nodes and edges
graph.add_nodes_from(np.arange(0,6,1))
edges = [(0,1,2.0),(0,2,3.0),(0,3,2.0),(0,4,4.0),(0,5,1.0),(1,2,4.0),(1,3,1.0),(1,4,1.0),(1,5,3.0),(2,4,2.0),(2,5,3.0),(3,4,5.0),(3,5,1.0)]
graph.add_weighted_edges_from(edges)
graphs['custom'] = graph
#Display widget
display_maxcut_widget(graphs['custom'])
def maxcut_cost_fn(graph: nx.Graph, bitstring: List[int]) -> float:
"""
Computes the maxcut cost function value for a given graph and cut represented by some bitstring
Args:
graph: The graph to compute cut values for
bitstring: A list of integer values '0' or '1' specifying a cut of the graph
Returns:
The value of the cut
"""
#Get the weight matrix of the graph
weight_matrix = nx.adjacency_matrix(graph).toarray()
size = weight_matrix.shape[0]
value = 0.
#INSERT YOUR CODE TO COMPUTE THE CUT VALUE HERE
for i in range(size):
for j in range(size):
value+= weight_matrix[i,j]*bitstring[i]*(1-bitstring[j])
return value
def plot_maxcut_histogram(graph: nx.Graph) -> None:
"""
Plots a bar diagram with the values for all possible cuts of a given graph.
Args:
graph: The graph to compute cut values for
"""
num_vars = graph.number_of_nodes()
#Create list of bitstrings and corresponding cut values
bitstrings = ['{:b}'.format(i).rjust(num_vars, '0')[::-1] for i in range(2**num_vars)]
values = [maxcut_cost_fn(graph = graph, bitstring = [int(x) for x in bitstring]) for bitstring in bitstrings]
#Sort both lists by largest cut value
values, bitstrings = zip(*sorted(zip(values, bitstrings)))
#Plot bar diagram
bar_plot = go.Bar(x = bitstrings, y = values, marker=dict(color=values, colorscale = 'plasma', colorbar=dict(title='Cut Value')))
fig = go.Figure(data=bar_plot, layout = dict(xaxis=dict(type = 'category'), width = 1500, height = 600))
fig.show()
plot_maxcut_histogram(graph = graphs['custom'])
from qc_grader import grade_lab2_ex1
bitstring = [1, 0, 1, 1, 0, 0] #DEFINE THE CORRECT MAXCUT BITSTRING HERE
# Note that the grading function is expecting a list of integers '0' and '1'
grade_lab2_ex1(bitstring)
from qiskit_optimization import QuadraticProgram
quadratic_program = QuadraticProgram('sample_problem')
print(quadratic_program.export_as_lp_string())
quadratic_program.binary_var(name = 'x_0')
quadratic_program.integer_var(name = 'x_1')
quadratic_program.continuous_var(name = 'x_2', lowerbound = -2.5, upperbound = 1.8)
quadratic = [[0,1,2],[3,4,5],[0,1,2]]
linear = [10,20,30]
quadratic_program.minimize(quadratic = quadratic, linear = linear, constant = -5)
print(quadratic_program.export_as_lp_string())
def quadratic_program_from_graph(graph: nx.Graph) -> QuadraticProgram:
"""Constructs a quadratic program from a given graph for a MaxCut problem instance.
Args:
graph: Underlying graph of the problem.
Returns:
QuadraticProgram
"""
#Get weight matrix of graph
weight_matrix = nx.adjacency_matrix(graph)
shape = weight_matrix.shape
size = shape[0]
#Build qubo matrix Q from weight matrix W
qubo_matrix = np.zeros((size, size))
qubo_vector = np.zeros(size)
for i in range(size):
for j in range(size):
qubo_matrix[i, j] -= weight_matrix[i, j]
for i in range(size):
for j in range(size):
qubo_vector[i] += weight_matrix[i,j]
#INSERT YOUR CODE HERE
quadratic_program=QuadraticProgram('sample_problem')
for i in range(size):
quadratic_program.binary_var(name='x_{}'.format(i))
quadratic_program.maximize(quadratic =qubo_matrix, linear = qubo_vector)
return quadratic_program
quadratic_program = quadratic_program_from_graph(graphs['custom'])
print(quadratic_program.export_as_lp_string())
from qc_grader import grade_lab2_ex2
# Note that the grading function is expecting a quadratic program
grade_lab2_ex2(quadratic_program)
def qaoa_circuit(qubo: QuadraticProgram, p: int = 1):
"""
Given a QUBO instance and the number of layers p, constructs the corresponding parameterized QAOA circuit with p layers.
Args:
qubo: The quadratic program instance
p: The number of layers in the QAOA circuit
Returns:
The parameterized QAOA circuit
"""
size = len(qubo.variables)
qubo_matrix = qubo.objective.quadratic.to_array(symmetric=True)
qubo_linearity = qubo.objective.linear.to_array()
#Prepare the quantum and classical registers
qaoa_circuit = QuantumCircuit(size,size)
#Apply the initial layer of Hadamard gates to all qubits
qaoa_circuit.h(range(size))
#Create the parameters to be used in the circuit
gammas = ParameterVector('gamma', p)
betas = ParameterVector('beta', p)
#Outer loop to create each layer
for i in range(p):
#Apply R_Z rotational gates from cost layer
#INSERT YOUR CODE HERE
for j in range(size):
qubo_matrix_sum_of_col = 0
for k in range(size):
qubo_matrix_sum_of_col+= qubo_matrix[j][k]
qaoa_circuit.rz(gammas[i]*(qubo_linearity[j]+qubo_matrix_sum_of_col),j)
#Apply R_ZZ rotational gates for entangled qubit rotations from cost layer
#INSERT YOUR CODE HERE
for j in range(size):
for k in range(size):
if j!=k:
qaoa_circuit.rzz(gammas[i]*qubo_matrix[j][k]*0.5,j,k)
# Apply single qubit X - rotations with angle 2*beta_i to all qubits
#INSERT YOUR CODE HERE
for j in range(size):
qaoa_circuit.rx(2*betas[i],j)
return qaoa_circuit
quadratic_program = quadratic_program_from_graph(graphs['custom'])
custom_circuit = qaoa_circuit(qubo = quadratic_program)
test = custom_circuit.assign_parameters(parameters=[1.0]*len(custom_circuit.parameters))
from qc_grader import grade_lab2_ex3
# Note that the grading function is expecting a quantum circuit
grade_lab2_ex3(test)
from qiskit.algorithms import QAOA
from qiskit_optimization.algorithms import MinimumEigenOptimizer
backend = Aer.get_backend('statevector_simulator')
qaoa = QAOA(optimizer = ADAM(), quantum_instance = backend, reps=1, initial_point = [0.1,0.1])
eigen_optimizer = MinimumEigenOptimizer(min_eigen_solver = qaoa)
quadratic_program = quadratic_program_from_graph(graphs['custom'])
result = eigen_optimizer.solve(quadratic_program)
print(result)
def plot_samples(samples):
"""
Plots a bar diagram for the samples of a quantum algorithm
Args:
samples
"""
#Sort samples by probability
samples = sorted(samples, key = lambda x: x.probability)
#Get list of probabilities, function values and bitstrings
probabilities = [sample.probability for sample in samples]
values = [sample.fval for sample in samples]
bitstrings = [''.join([str(int(i)) for i in sample.x]) for sample in samples]
#Plot bar diagram
sample_plot = go.Bar(x = bitstrings, y = probabilities, marker=dict(color=values, colorscale = 'plasma',colorbar=dict(title='Function Value')))
fig = go.Figure(
data=sample_plot,
layout = dict(
xaxis=dict(
type = 'category'
)
)
)
fig.show()
plot_samples(result.samples)
graph_name = 'custom'
quadratic_program = quadratic_program_from_graph(graph=graphs[graph_name])
trajectory={'beta_0':[], 'gamma_0':[], 'energy':[]}
offset = 1/4*quadratic_program.objective.quadratic.to_array(symmetric = True).sum() + 1/2*quadratic_program.objective.linear.to_array().sum()
def callback(eval_count, params, mean, std_dev):
trajectory['beta_0'].append(params[1])
trajectory['gamma_0'].append(params[0])
trajectory['energy'].append(-mean + offset)
optimizers = {
'cobyla': COBYLA(),
'slsqp': SLSQP(),
'adam': ADAM()
}
qaoa = QAOA(optimizer = optimizers['cobyla'], quantum_instance = backend, reps=1, initial_point = [6.2,1.8],callback = callback)
eigen_optimizer = MinimumEigenOptimizer(min_eigen_solver = qaoa)
result = eigen_optimizer.solve(quadratic_program)
fig = QAOA_widget(landscape_file=f'./resources/energy_landscapes/{graph_name}.csv', trajectory = trajectory, samples = result.samples)
fig.show()
graph_name = 'custom'
quadratic_program = quadratic_program_from_graph(graphs[graph_name])
#Create callback to record total number of evaluations
max_evals = 0
def callback(eval_count, params, mean, std_dev):
global max_evals
max_evals = eval_count
#Create empty lists to track values
energies = []
runtimes = []
num_evals=[]
#Run QAOA for different values of p
for p in range(1,10):
print(f'Evaluating for p = {p}...')
qaoa = QAOA(optimizer = optimizers['cobyla'], quantum_instance = backend, reps=p, callback=callback)
eigen_optimizer = MinimumEigenOptimizer(min_eigen_solver = qaoa)
start = time()
result = eigen_optimizer.solve(quadratic_program)
runtimes.append(time()-start)
num_evals.append(max_evals)
#Calculate energy of final state from samples
avg_value = 0.
for sample in result.samples:
avg_value += sample.probability*sample.fval
energies.append(avg_value)
#Create and display plots
energy_plot = go.Scatter(x = list(range(1,10)), y =energies, marker=dict(color=energies, colorscale = 'plasma'))
runtime_plot = go.Scatter(x = list(range(1,10)), y =runtimes, marker=dict(color=runtimes, colorscale = 'plasma'))
num_evals_plot = go.Scatter(x = list(range(1,10)), y =num_evals, marker=dict(color=num_evals, colorscale = 'plasma'))
fig = make_subplots(rows = 1, cols = 3, subplot_titles = ['Energy value', 'Runtime', 'Number of evaluations'])
fig.update_layout(width=1800,height=600, showlegend=False)
fig.add_trace(energy_plot, row=1, col=1)
fig.add_trace(runtime_plot, row=1, col=2)
fig.add_trace(num_evals_plot, row=1, col=3)
clear_output()
fig.show()
def plot_qaoa_energy_landscape(graph: nx.Graph, cvar: float = None):
num_shots = 1000
seed = 42
simulator = Aer.get_backend('qasm_simulator')
simulator.set_options(seed_simulator = 42)
#Generate circuit
circuit = qaoa_circuit(qubo = quadratic_program_from_graph(graph), p=1)
circuit.measure(range(graph.number_of_nodes()),range(graph.number_of_nodes()))
#Create dictionary with precomputed cut values for all bitstrings
cut_values = {}
size = graph.number_of_nodes()
for i in range(2**size):
bitstr = '{:b}'.format(i).rjust(size, '0')[::-1]
x = [int(bit) for bit in bitstr]
cut_values[bitstr] = maxcut_cost_fn(graph, x)
#Perform grid search over all parameters
data_points = []
max_energy = None
for beta in np.linspace(0,np.pi, 50):
for gamma in np.linspace(0, 4*np.pi, 50):
bound_circuit = circuit.assign_parameters([beta, gamma])
result = simulator.run(bound_circuit, shots = num_shots).result()
statevector = result.get_counts(bound_circuit)
energy = 0
measured_cuts = []
for bitstring, count in statevector.items():
measured_cuts = measured_cuts + [cut_values[bitstring]]*count
if cvar is None:
#Calculate the mean of all cut values
energy = sum(measured_cuts)/num_shots
else:
#raise NotImplementedError()
#INSERT YOUR CODE HERE
measured_cuts = sorted(measured_cuts, reverse = True)
for w in range(int(cvar*num_shots)):
energy += measured_cuts[w]/int((cvar*num_shots))
#Update optimal parameters
if max_energy is None or energy > max_energy:
max_energy = energy
optimum = {'beta': beta, 'gamma': gamma, 'energy': energy}
#Update data
data_points.append({'beta': beta, 'gamma': gamma, 'energy': energy})
#Create and display surface plot from data_points
df = pd.DataFrame(data_points)
df = df.pivot(index='beta', columns='gamma', values='energy')
matrix = df.to_numpy()
beta_values = df.index.tolist()
gamma_values = df.columns.tolist()
surface_plot = go.Surface(
x=gamma_values,
y=beta_values,
z=matrix,
coloraxis = 'coloraxis'
)
fig = go.Figure(data = surface_plot)
fig.show()
#Return optimum
return optimum
graph = graphs['custom']
optimal_parameters = plot_qaoa_energy_landscape(graph = graph)
print('Optimal parameters:')
print(optimal_parameters)
optimal_parameters = plot_qaoa_energy_landscape(graph = graph, cvar = 0.2)
print(optimal_parameters)
from qc_grader import grade_lab2_ex4
# Note that the grading function is expecting a python dictionary
# with the entries 'beta', 'gamma' and 'energy'
grade_lab2_ex4(optimal_parameters)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.