repo
stringclasses 900
values | file
stringclasses 754
values | content
stringlengths 4
215k
|
|---|---|---|
https://github.com/scaleway/qiskit-scaleway
|
scaleway
|
import os
import random
from qiskit import QuantumCircuit
from qiskit_scaleway import ScalewayProvider
def test_qsim_simple_circuit():
provider = ScalewayProvider(
project_id=os.environ["QISKIT_SCALEWAY_PROJECT_ID"],
secret_key=os.environ["QISKIT_SCALEWAY_API_TOKEN"],
url=os.environ["QISKIT_SCALEWAY_API_URL"],
)
backend = provider.get_backend("qsim_simulation_pop_c16m128")
assert backend is not None
session_id = backend.start_session(
name="my-qsim-session-autotest",
deduplication_id=f"my-qsim-session-autotest-{random.randint(1, 1000)}",
max_duration="2m",
)
assert session_id is not None
try:
qc = QuantumCircuit(4)
qc.h(0)
qc.cx(0, 1)
qc.cx(0, 2)
qc.cx(0, 3)
qc.measure_all()
shots_count = 1000
job = backend.run(qc, shots=shots_count, session_id=session_id)
# cirq_result = job.result(format="cirq")
# assert cirq_result is not None
# assert cirq_result.repetitions == shots_count
qiskit_result = job.result(format="qiskit")
assert qiskit_result is not None
assert qiskit_result.success
assert qiskit_result.results[0].shots == shots_count
finally:
backend.stop_session(session_id)
|
https://github.com/scaleway/qiskit-scaleway
|
scaleway
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Sampler."""
import unittest
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes
from qiskit.exceptions import QiskitError
from qiskit.extensions.unitary import UnitaryGate
from qiskit.primitives import Sampler, SamplerResult
from qiskit.providers import JobStatus, JobV1
from qiskit.test import QiskitTestCase
class TestSampler(QiskitTestCase):
"""Test Sampler"""
def setUp(self):
super().setUp()
hadamard = QuantumCircuit(1, 1, name="Hadamard")
hadamard.h(0)
hadamard.measure(0, 0)
bell = QuantumCircuit(2, name="Bell")
bell.h(0)
bell.cx(0, 1)
bell.measure_all()
self._circuit = [hadamard, bell]
self._target = [
{0: 0.5, 1: 0.5},
{0: 0.5, 3: 0.5, 1: 0, 2: 0},
]
self._pqc = RealAmplitudes(num_qubits=2, reps=2)
self._pqc.measure_all()
self._pqc2 = RealAmplitudes(num_qubits=2, reps=3)
self._pqc2.measure_all()
self._pqc_params = [[0.0] * 6, [1.0] * 6]
self._pqc_target = [{0: 1}, {0: 0.0148, 1: 0.3449, 2: 0.0531, 3: 0.5872}]
self._theta = [
[0, 1, 1, 2, 3, 5],
[1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6, 7],
]
def _generate_circuits_target(self, indices):
if isinstance(indices, list):
circuits = [self._circuit[j] for j in indices]
target = [self._target[j] for j in indices]
else:
raise ValueError(f"invalid index {indices}")
return circuits, target
def _generate_params_target(self, indices):
if isinstance(indices, int):
params = self._pqc_params[indices]
target = self._pqc_target[indices]
elif isinstance(indices, list):
params = [self._pqc_params[j] for j in indices]
target = [self._pqc_target[j] for j in indices]
else:
raise ValueError(f"invalid index {indices}")
return params, target
def _compare_probs(self, prob, target):
if not isinstance(prob, list):
prob = [prob]
if not isinstance(target, list):
target = [target]
self.assertEqual(len(prob), len(target))
for p, targ in zip(prob, target):
for key, t_val in targ.items():
if key in p:
self.assertAlmostEqual(p[key], t_val, places=1)
else:
self.assertAlmostEqual(t_val, 0, places=1)
def test_sampler_run(self):
"""Test Sampler.run()."""
bell = self._circuit[1]
sampler = Sampler()
job = sampler.run(circuits=[bell])
self.assertIsInstance(job, JobV1)
result = job.result()
self.assertIsInstance(result, SamplerResult)
# print([q.binary_probabilities() for q in result.quasi_dists])
self._compare_probs(result.quasi_dists, self._target[1])
def test_sample_run_multiple_circuits(self):
"""Test Sampler.run() with multiple circuits."""
# executes three Bell circuits
# Argument `parameters` is optional.
bell = self._circuit[1]
sampler = Sampler()
result = sampler.run([bell, bell, bell]).result()
# print([q.binary_probabilities() for q in result.quasi_dists])
self._compare_probs(result.quasi_dists[0], self._target[1])
self._compare_probs(result.quasi_dists[1], self._target[1])
self._compare_probs(result.quasi_dists[2], self._target[1])
def test_sampler_run_with_parameterized_circuits(self):
"""Test Sampler.run() with parameterized circuits."""
# parameterized circuit
pqc = self._pqc
pqc2 = self._pqc2
theta1, theta2, theta3 = self._theta
sampler = Sampler()
result = sampler.run([pqc, pqc, pqc2], [theta1, theta2, theta3]).result()
# result of pqc(theta1)
prob1 = {
"00": 0.1309248462975777,
"01": 0.3608720796028448,
"10": 0.09324865232050054,
"11": 0.41495442177907715,
}
self.assertDictAlmostEqual(result.quasi_dists[0].binary_probabilities(), prob1)
# result of pqc(theta2)
prob2 = {
"00": 0.06282290651933871,
"01": 0.02877144385576705,
"10": 0.606654494132085,
"11": 0.3017511554928094,
}
self.assertDictAlmostEqual(result.quasi_dists[1].binary_probabilities(), prob2)
# result of pqc2(theta3)
prob3 = {
"00": 0.1880263994380416,
"01": 0.6881971261189544,
"10": 0.09326232720582443,
"11": 0.030514147237179892,
}
self.assertDictAlmostEqual(result.quasi_dists[2].binary_probabilities(), prob3)
def test_run_1qubit(self):
"""test for 1-qubit cases"""
qc = QuantumCircuit(1)
qc.measure_all()
qc2 = QuantumCircuit(1)
qc2.x(0)
qc2.measure_all()
sampler = Sampler()
result = sampler.run([qc, qc2]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 2)
for i in range(2):
keys, values = zip(*sorted(result.quasi_dists[i].items()))
self.assertTupleEqual(keys, (i,))
np.testing.assert_allclose(values, [1])
def test_run_2qubit(self):
"""test for 2-qubit cases"""
qc0 = QuantumCircuit(2)
qc0.measure_all()
qc1 = QuantumCircuit(2)
qc1.x(0)
qc1.measure_all()
qc2 = QuantumCircuit(2)
qc2.x(1)
qc2.measure_all()
qc3 = QuantumCircuit(2)
qc3.x([0, 1])
qc3.measure_all()
sampler = Sampler()
result = sampler.run([qc0, qc1, qc2, qc3]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 4)
for i in range(4):
keys, values = zip(*sorted(result.quasi_dists[i].items()))
self.assertTupleEqual(keys, (i,))
np.testing.assert_allclose(values, [1])
def test_run_single_circuit(self):
"""Test for single circuit case."""
sampler = Sampler()
with self.subTest("No parameter"):
circuit = self._circuit[1]
target = self._target[1]
param_vals = [None, [], [[]], np.array([]), np.array([[]])]
for val in param_vals:
with self.subTest(f"{circuit.name} w/ {val}"):
result = sampler.run(circuit, val).result()
self._compare_probs(result.quasi_dists, target)
self.assertEqual(len(result.metadata), 1)
with self.subTest("One parameter"):
circuit = QuantumCircuit(1, 1, name="X gate")
param = Parameter("x")
circuit.ry(param, 0)
circuit.measure(0, 0)
target = [{1: 1}]
param_vals = [
[np.pi],
[[np.pi]],
np.array([np.pi]),
np.array([[np.pi]]),
[np.array([np.pi])],
]
for val in param_vals:
with self.subTest(f"{circuit.name} w/ {val}"):
result = sampler.run(circuit, val).result()
self._compare_probs(result.quasi_dists, target)
self.assertEqual(len(result.metadata), 1)
with self.subTest("More than one parameter"):
circuit = self._pqc
target = [self._pqc_target[0]]
param_vals = [
self._pqc_params[0],
[self._pqc_params[0]],
np.array(self._pqc_params[0]),
np.array([self._pqc_params[0]]),
[np.array(self._pqc_params[0])],
]
for val in param_vals:
with self.subTest(f"{circuit.name} w/ {val}"):
result = sampler.run(circuit, val).result()
self._compare_probs(result.quasi_dists, target)
self.assertEqual(len(result.metadata), 1)
def test_run_reverse_meas_order(self):
"""test for sampler with reverse measurement order"""
x = Parameter("x")
y = Parameter("y")
qc = QuantumCircuit(3, 3)
qc.rx(x, 0)
qc.rx(y, 1)
qc.x(2)
qc.measure(0, 2)
qc.measure(1, 1)
qc.measure(2, 0)
sampler = Sampler()
result = sampler.run([qc] * 2, [[0, 0], [np.pi / 2, 0]]).result()
self.assertIsInstance(result, SamplerResult)
self.assertEqual(len(result.quasi_dists), 2)
# qc({x: 0, y: 0})
keys, values = zip(*sorted(result.quasi_dists[0].items()))
self.assertTupleEqual(keys, (1,))
np.testing.assert_allclose(values, [1])
# qc({x: pi/2, y: 0})
keys, values = zip(*sorted(result.quasi_dists[1].items()))
self.assertTupleEqual(keys, (1, 5))
np.testing.assert_allclose(values, [0.5, 0.5])
def test_run_errors(self):
"""Test for errors with run method"""
qc1 = QuantumCircuit(1)
qc1.measure_all()
qc2 = RealAmplitudes(num_qubits=1, reps=1)
qc2.measure_all()
qc3 = QuantumCircuit(1)
qc4 = QuantumCircuit(1, 1)
sampler = Sampler()
with self.subTest("set parameter values to a non-parameterized circuit"):
with self.assertRaises(ValueError):
_ = sampler.run([qc1], [[1e2]])
with self.subTest("missing all parameter values for a parameterized circuit"):
with self.assertRaises(ValueError):
_ = sampler.run([qc2], [[]])
with self.subTest("missing some parameter values for a parameterized circuit"):
with self.assertRaises(ValueError):
_ = sampler.run([qc2], [[1e2]])
with self.subTest("too many parameter values for a parameterized circuit"):
with self.assertRaises(ValueError):
_ = sampler.run([qc2], [[1e2]] * 100)
with self.subTest("no classical bits"):
with self.assertRaises(ValueError):
_ = sampler.run([qc3], [[]])
with self.subTest("no measurement"):
with self.assertRaises(QiskitError):
# The following raises QiskitError because this check is located in
# `Sampler._preprocess_circuit`
_ = sampler.run([qc4], [[]])
def test_run_empty_parameter(self):
"""Test for empty parameter"""
n = 5
qc = QuantumCircuit(n, n - 1)
qc.measure(range(n - 1), range(n - 1))
sampler = Sampler()
with self.subTest("one circuit"):
result = sampler.run([qc], shots=1000).result()
self.assertEqual(len(result.quasi_dists), 1)
for q_d in result.quasi_dists:
quasi_dist = {k: v for k, v in q_d.items() if v != 0.0}
self.assertDictEqual(quasi_dist, {0: 1.0})
self.assertEqual(len(result.metadata), 1)
with self.subTest("two circuits"):
result = sampler.run([qc, qc], shots=1000).result()
self.assertEqual(len(result.quasi_dists), 2)
for q_d in result.quasi_dists:
quasi_dist = {k: v for k, v in q_d.items() if v != 0.0}
self.assertDictEqual(quasi_dist, {0: 1.0})
self.assertEqual(len(result.metadata), 2)
def test_run_numpy_params(self):
"""Test for numpy array as parameter values"""
qc = RealAmplitudes(num_qubits=2, reps=2)
qc.measure_all()
k = 5
params_array = np.random.rand(k, qc.num_parameters)
params_list = params_array.tolist()
params_list_array = list(params_array)
sampler = Sampler()
target = sampler.run([qc] * k, params_list).result()
with self.subTest("ndarrary"):
result = sampler.run([qc] * k, params_array).result()
self.assertEqual(len(result.metadata), k)
for i in range(k):
self.assertDictEqual(result.quasi_dists[i], target.quasi_dists[i])
with self.subTest("list of ndarray"):
result = sampler.run([qc] * k, params_list_array).result()
self.assertEqual(len(result.metadata), k)
for i in range(k):
self.assertDictEqual(result.quasi_dists[i], target.quasi_dists[i])
def test_run_with_shots_option(self):
"""test with shots option."""
params, target = self._generate_params_target([1])
sampler = Sampler()
result = sampler.run(
circuits=[self._pqc], parameter_values=params, shots=1024, seed=15
).result()
self._compare_probs(result.quasi_dists, target)
def test_run_with_shots_option_none(self):
"""test with shots=None option. Seed is ignored then."""
sampler = Sampler()
result_42 = sampler.run(
[self._pqc], parameter_values=[[0, 1, 1, 2, 3, 5]], shots=None, seed=42
).result()
result_15 = sampler.run(
[self._pqc], parameter_values=[[0, 1, 1, 2, 3, 5]], shots=None, seed=15
).result()
self.assertDictAlmostEqual(result_42.quasi_dists, result_15.quasi_dists)
def test_run_shots_result_size(self):
"""test with shots option to validate the result size"""
n = 10
shots = 100
qc = QuantumCircuit(n)
qc.h(range(n))
qc.measure_all()
sampler = Sampler()
result = sampler.run(qc, [], shots=shots, seed=42).result()
self.assertEqual(len(result.quasi_dists), 1)
self.assertLessEqual(len(result.quasi_dists[0]), shots)
self.assertAlmostEqual(sum(result.quasi_dists[0].values()), 1.0)
def test_primitive_job_status_done(self):
"""test primitive job's status"""
bell = self._circuit[1]
sampler = Sampler()
job = sampler.run(circuits=[bell])
_ = job.result()
self.assertEqual(job.status(), JobStatus.DONE)
def test_options(self):
"""Test for options"""
with self.subTest("init"):
sampler = Sampler(options={"shots": 3000})
self.assertEqual(sampler.options.get("shots"), 3000)
with self.subTest("set_options"):
sampler.set_options(shots=1024, seed=15)
self.assertEqual(sampler.options.get("shots"), 1024)
self.assertEqual(sampler.options.get("seed"), 15)
with self.subTest("run"):
params, target = self._generate_params_target([1])
result = sampler.run([self._pqc], parameter_values=params).result()
self._compare_probs(result.quasi_dists, target)
self.assertEqual(result.quasi_dists[0].shots, 1024)
def test_circuit_with_unitary(self):
"""Test for circuit with unitary gate."""
gate = UnitaryGate(np.eye(2))
circuit = QuantumCircuit(1)
circuit.append(gate, [0])
circuit.measure_all()
sampler = Sampler()
sampler_result = sampler.run([circuit]).result()
self.assertDictAlmostEqual(sampler_result.quasi_dists[0], {0: 1, 1: 0})
if __name__ == "__main__":
unittest.main()
|
https://github.com/mgg39/qiskit-networks
|
mgg39
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wrong-import-order
"""Main Qiskit public functionality."""
import pkgutil
# First, check for required Python and API version
from . import util
# qiskit errors operator
from .exceptions import QiskitError
# The main qiskit operators
from qiskit.circuit import ClassicalRegister
from qiskit.circuit import QuantumRegister
from qiskit.circuit import QuantumCircuit
# pylint: disable=redefined-builtin
from qiskit.tools.compiler import compile # TODO remove after 0.8
from qiskit.execute import execute
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions
import qiskit.circuit.measure
import qiskit.circuit.reset
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports or any non-import code AND *before*
# importing the package you want to allow extensions for (in this case `backends`).
__path__ = pkgutil.extend_path(__path__, __name__)
# Please note these are global instances, not modules.
from qiskit.providers.basicaer import BasicAer
# Try to import the Aer provider if installed.
try:
from qiskit.providers.aer import Aer
except ImportError:
pass
# Try to import the IBMQ provider if installed.
try:
from qiskit.providers.ibmq import IBMQ
except ImportError:
pass
from .version import __version__
from .version import __qiskit_version__
|
https://github.com/mgg39/qiskit-networks
|
mgg39
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wrong-import-order
"""Main Qiskit public functionality."""
import pkgutil
# First, check for required Python and API version
from . import util
# qiskit errors operator
from .exceptions import QiskitError
# The main qiskit operators
from qiskit.circuit import ClassicalRegister
from qiskit.circuit import QuantumRegister
from qiskit.circuit import QuantumCircuit
# pylint: disable=redefined-builtin
from qiskit.tools.compiler import compile # TODO remove after 0.8
from qiskit.execute import execute
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions
import qiskit.circuit.measure
import qiskit.circuit.reset
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports or any non-import code AND *before*
# importing the package you want to allow extensions for (in this case `backends`).
__path__ = pkgutil.extend_path(__path__, __name__)
# Please note these are global instances, not modules.
from qiskit.providers.basicaer import BasicAer
# Try to import the Aer provider if installed.
try:
from qiskit.providers.aer import Aer
except ImportError:
pass
# Try to import the IBMQ provider if installed.
try:
from qiskit.providers.ibmq import IBMQ
except ImportError:
pass
from .version import __version__
from .version import __qiskit_version__
|
https://github.com/mgg39/qiskit-networks
|
mgg39
|
import numpy as np
import qiskit
class Node:
"""
Object class for the empty Nodes.
"""
def __init__(self, name: str, location: list, elements: list, ports: list):
"""
Constructor of the Node.
:param name: the name of the Node.
:param location: defined location of the Node within a topology.
:param elements: list of network elements within the Node.
:param ports: list of ports within the Node.
"""
self.name = name
"""
Set the name of the Node.
"""
self.location = location
"""
Set the location address of the Node.
"""
def get_name(self) -> str:
"""
Return the name of the Node.
"""
return self.name
def get_location(self) -> list:
"""
Return the location address of the Node.
Should be described as a 2D or 3D array.
"""
if len(self.location) == 2:
self.dimension_topology = 2
elif len(self.location) == 3:
self.dimension_topology = 3
else:
self.dimension_topology = np.nan
print("Node.location has an incorrect dimensionality. Please define the location through the use of a 2D or a 3D grid system.")
return self.location
def elements(self) -> list:
"""
Return the list of element within the Node.
"""
return self.elements
def ports(self) -> list:
"""
Return ports within the Node.
"""
return self.ports
|
https://github.com/mgg39/qiskit-networks
|
mgg39
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wrong-import-order
"""Main Qiskit public functionality."""
import pkgutil
# First, check for required Python and API version
from . import util
# qiskit errors operator
from .exceptions import QiskitError
# The main qiskit operators
from qiskit.circuit import ClassicalRegister
from qiskit.circuit import QuantumRegister
from qiskit.circuit import QuantumCircuit
# pylint: disable=redefined-builtin
from qiskit.tools.compiler import compile # TODO remove after 0.8
from qiskit.execute import execute
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions
import qiskit.circuit.measure
import qiskit.circuit.reset
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports or any non-import code AND *before*
# importing the package you want to allow extensions for (in this case `backends`).
__path__ = pkgutil.extend_path(__path__, __name__)
# Please note these are global instances, not modules.
from qiskit.providers.basicaer import BasicAer
# Try to import the Aer provider if installed.
try:
from qiskit.providers.aer import Aer
except ImportError:
pass
# Try to import the IBMQ provider if installed.
try:
from qiskit.providers.ibmq import IBMQ
except ImportError:
pass
from .version import __version__
from .version import __qiskit_version__
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
import os
import qiskit.ignis.verification.randomized_benchmarking as rb
#Number of seeds (random sequences)
nseeds = 2
#Number of Cliffords in the sequence
nCliffs = [1, 10, 20, 50]
#2Q RB on Q0,Q2 and 1Q RB on Q1 etc., also defines the width of the circuit
# rb_pattern = [[0,2],[1],[3,4]]
rb_pattern = [[0,1],[2],[3,4]]
rb_pattern_string = '_'.join(['-'.join([str(r) for r in rb]) for rb in rb_pattern])
#Do three times as many 1Q Cliffords
#length_multiplier = [1,3,1,2]
rb_opts = {}
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
#rb_opts['length_multiplier'] = length_multiplier
rb_circs, xdata = rb.randomized_benchmarking_seq(**rb_opts)
# store generated circuits in desired directory
count = 0
indexListElem = 0
for listElem in rb_circs:
indexListElem += 1
listElem_string = str(indexListElem)
count += len(listElem)
for indexElem, elem in enumerate(listElem):
print("----------------------------------------")
print(f"depth: {elem.depth()}")
print(f"number of multi-qubit gates: {elem.num_nonlocal_gates()}")
nCliffs_string = str(nCliffs[indexElem])
file_name = 'pattern' + rb_pattern_string + 'nCliffs' + nCliffs_string + 'seed' + listElem_string + '.qasm'
save_dir = '/Users/mariesalm/Downloads/temp/' # enter your directory here
with open(os.path.join(save_dir, file_name), 'w') as file:
file.write(elem.qasm())
print("****************************************")
print (f"Generated {count} circuits")
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute, Aer
# https://quantum-circuit.com/app_details/about/66bpe6Jf5mgQMahgd
# oracle = '(A | B) & (A | ~B) & (~A | B)'
qc = QuantumCircuit()
q = QuantumRegister(8, 'q')
c = ClassicalRegister(2, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.x(q[2])
qc.x(q[3])
qc.x(q[4])
qc.x(q[7])
qc.x(q[0])
qc.x(q[1])
qc.h(q[7])
qc.ccx(q[0], q[1], q[2])
qc.x(q[0])
qc.x(q[1])
qc.x(q[1])
qc.ccx(q[0], q[1], q[3])
qc.x(q[0])
qc.x(q[1])
qc.ccx(q[1], q[0], q[4])
qc.x(q[0])
qc.ccx(q[3], q[2], q[5])
qc.x(q[0])
qc.ccx(q[5], q[4], q[6])
qc.cx(q[6], q[7])
qc.ccx(q[4], q[5], q[6])
qc.ccx(q[2], q[3], q[5])
qc.x(q[4])
qc.ccx(q[0], q[1], q[4])
qc.x(q[0])
qc.x(q[1])
qc.x(q[3])
qc.ccx(q[0], q[1], q[3])
qc.x(q[0])
qc.x(q[1])
qc.x(q[2])
qc.x(q[1])
qc.ccx(q[0], q[1], q[2])
qc.x(q[0])
qc.x(q[1])
qc.h(q[0])
qc.h(q[1])
qc.x(q[0])
qc.x(q[1])
qc.cz(q[0], q[1])
qc.x(q[0])
qc.x(q[1])
qc.h(q[0])
qc.h(q[1])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
# https://quantum-circuit.com/app_details/about/bw5r9HTiTHvQHtCB5
qc = QuantumCircuit()
q = QuantumRegister(5, 'q')
c = ClassicalRegister(3, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[1])
qc.cx(q[2], q[3])
qc.cu1(0, q[1], q[0])
qc.cx(q[2], q[4])
qc.h(q[0])
qc.cu1(0, q[1], q[2])
qc.cu1(0, q[0], q[2])
qc.h(q[2])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[2], c[2])
def get_circuit(**kwargs):
"""Get circuit of Shor with input 15."""
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from typing import List
from qiskit import QuantumCircuit, transpile, QuantumRegister, ClassicalRegister
from qiskit import BasicAer,Aer,execute, IBMQ
from qiskit.providers.aer import QasmSimulator
from qiskit.circuit.library.arithmetic import DraperQFTAdder, RGQFTMultiplier
from qiskit.circuit.library import IntegerComparator
from qiskit.algorithms import IterativeAmplitudeEstimation, AmplitudeEstimation
from qiskit.algorithms import EstimationProblem
from qiskit.utils import QuantumInstance
from qiskit.providers.fake_provider import FakeMontreal
def encode_input(x: List[int]) -> "Gate":
num_qubits = len(x)
qc = QuantumCircuit(num_qubits, name="encoding")
for i in range(num_qubits):
if x[i] == 1:
qc.x(num_qubits - i - 1)
return qc
def encode_hadamard_copy(num_qubits: int) -> "Gate":
qc = QuantumCircuit(num_qubits * 2, name="extension")
for i in range(num_qubits):
qc.cx(i, i + num_qubits)
return qc
def encode_hadamard(num_qubits: int) -> "Gate":
qc = QuantumCircuit(num_qubits, name="encoding")
for i in range(num_qubits):
qc.h(i)
return qc
def classical(x: List[int], y: List[int]) -> int:
num1 = 0
for i in range(len(x)):
num1 += x[i] * 0.5 ** (i + 1)
num2 = 0
for i in range(len(y)):
num2 += x[i] * 0.5 ** (i + 1)
result = num1 ** 2 + num2 ** 2
if result < 1:
return 1
return 0
def operator(bits_per_input, x1, x2, useQAE):
num_bits_after_mult = 2 * bits_per_input
num_bits_comparer = num_bits_after_mult + 1
nQubits = 4 * bits_per_input + 2 * num_bits_after_mult + 1 + num_bits_comparer
nClassical = 1
input_register_1 = QuantumRegister(size=bits_per_input)
input_register_1_copy = QuantumRegister(size=bits_per_input)
input_register_2 = QuantumRegister(size=bits_per_input)
input_register_2_copy = QuantumRegister(size=bits_per_input)
if len(x1) == 0 and len(x2) == 0:
input_circuit_1 = encode_hadamard(bits_per_input)
input_circuit_1_copy = encode_hadamard_copy(bits_per_input)
input_circuit_2 = encode_hadamard(bits_per_input)
input_circuit_2_copy = encode_hadamard_copy(bits_per_input)
else:
input_circuit_1 = encode_input(x1)
input_circuit_1_copy = encode_input(x1)
input_circuit_2 = encode_input(x2)
input_circuit_2_copy = encode_input(x2)
carry_qubits_mult_1 = QuantumRegister(size=num_bits_after_mult)
carry_qubits_mult_2 = QuantumRegister(size=num_bits_after_mult)
carry_qubits_comparer = QuantumRegister(size=num_bits_comparer)
carry_qubit_addition = QuantumRegister(size=1) # 1 additional qubit for addition
if useQAE == False:
output_register = ClassicalRegister(size=nClassical)
circuit = QuantumCircuit(
input_register_1,
input_register_1_copy,
input_register_2,
input_register_2_copy,
carry_qubits_mult_1,
carry_qubits_mult_2,
carry_qubit_addition,
carry_qubits_comparer,
output_register
)
else:
circuit = QuantumCircuit(
input_register_1,
input_register_1_copy,
input_register_2,
input_register_2_copy,
carry_qubits_mult_1,
carry_qubits_mult_2,
carry_qubit_addition,
carry_qubits_comparer
)
# encoding
circuit.append(input_circuit_1, input_register_1[:])
if len(x1) == 0 and len(x2) == 0:
circuit.append(input_circuit_1_copy, input_register_1[:] + input_register_1_copy[:])
else:
circuit.append(input_circuit_1_copy, input_register_1_copy[:])
circuit.append(input_circuit_2, input_register_2[:])
if len(x1) == 0 and len(x2) == 0:
circuit.append(input_circuit_2_copy, input_register_2[:] + input_register_2_copy[:])
else:
circuit.append(input_circuit_2_copy, input_register_2_copy[:])
# multiplication
multiplicator = RGQFTMultiplier(num_state_qubits=bits_per_input)
circuit.append(multiplicator, input_register_1[:] + input_register_1_copy[:] + carry_qubits_mult_1[:])
circuit.append(multiplicator, input_register_2[:] + input_register_2_copy[:] + carry_qubits_mult_2[:])
# addition
adder = DraperQFTAdder(num_bits_after_mult, kind="half")
circuit.append(adder, carry_qubits_mult_1[:] + carry_qubits_mult_2[:] + carry_qubit_addition[:])
# inequality check if in circle
s = 2 ** (bits_per_input) - 1
comparer = IntegerComparator(num_bits_after_mult + 1, s * s + 1, False)
circuit.append(comparer, carry_qubits_mult_2[:] + carry_qubit_addition[:] + carry_qubits_comparer[:])
# readout
if useQAE == False:
circuit.measure(nQubits - num_bits_comparer, 0)
return circuit
def get_circuit(**kwargs):
bits_per_input =1 # num qubits per number
useQAE =True
circuit =operator(bits_per_input,[],[],useQAE)
#circuit.draw('mpl')
#backend = FakeMontreal()
#circuit_transpiled = transpile(circuit,backend)
#circuit_transpiled.draw('mpl')
#circuit_transpiled.depth()
backend =Aer.get_backend('qasm_simulator')
circuit_to_return = None
if useQAE == True:
quantum_instance = Aer.get_backend('qasm_simulator')
num_bits_after_mult = 2 * bits_per_input
num_bits_comparer =num_bits_after_mult+1
nQubits=4*bits_per_input + 2*num_bits_after_mult+1 +num_bits_comparer
problem = EstimationProblem(
state_preparation=circuit.decompose(), # A operator
objective_qubits=[nQubits-num_bits_comparer], # the "good" state Psi1 is identified as measuring |1> in qubit 0
)
print(f"Depth: ", circuit.depth())
print(f"Width: ", circuit.num_qubits)
ae = AmplitudeEstimation(
num_eval_qubits=3, # the number of evaluation qubits specifies circuit width and accuracy
quantum_instance=quantum_instance,
)
iae = IterativeAmplitudeEstimation(
epsilon_target=0.01, # target accuracy
alpha=0.05, # width of the confidence interval
quantum_instance=quantum_instance,
)
circuit_to_return = ae.construct_circuit(problem, measurement=True).decompose()
print(circuit_to_return)
result =ae.estimate(problem)
print(result)
else:
shots =1000
job= execute(circuit,backend=backend,shots=shots)
counts =job.result().get_counts()
print(counts)
#print(4 * counts['1'] /shots )
return circuit_to_return
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from typing import List
from qiskit import QuantumCircuit, transpile, QuantumRegister, ClassicalRegister
from qiskit import BasicAer,Aer,execute, IBMQ
from qiskit.providers.aer import QasmSimulator
from qiskit.circuit.library.arithmetic import DraperQFTAdder, RGQFTMultiplier
from qiskit.circuit.library import IntegerComparator
from qiskit.algorithms import IterativeAmplitudeEstimation, AmplitudeEstimation
from qiskit.algorithms import EstimationProblem
from qiskit.utils import QuantumInstance
from qiskit.providers.fake_provider import FakeMontreal
def encode_input(x: List[int]) -> "Gate":
num_qubits = len(x)
qc = QuantumCircuit(num_qubits, name="encoding")
for i in range(num_qubits):
if x[i] == 1:
qc.x(num_qubits - i - 1)
return qc
def encode_hadamard_copy(num_qubits: int) -> "Gate":
qc = QuantumCircuit(num_qubits * 2, name="extension")
for i in range(num_qubits):
qc.cx(i, i + num_qubits)
return qc
def encode_hadamard(num_qubits: int) -> "Gate":
qc = QuantumCircuit(num_qubits, name="encoding")
for i in range(num_qubits):
qc.h(i)
return qc
def classical(x: List[int], y: List[int]) -> int:
num1 = 0
for i in range(len(x)):
num1 += x[i] * 0.5 ** (i + 1)
num2 = 0
for i in range(len(y)):
num2 += x[i] * 0.5 ** (i + 1)
result = num1 ** 2 + num2 ** 2
if result < 1:
return 1
return 0
def operator(bits_per_input, x1, x2, useQAE):
num_bits_after_mult = 2 * bits_per_input
num_bits_comparer = num_bits_after_mult + 1
nQubits = 4 * bits_per_input + 2 * num_bits_after_mult + 1 + num_bits_comparer
nClassical = 1
input_register_1 = QuantumRegister(size=bits_per_input)
input_register_1_copy = QuantumRegister(size=bits_per_input)
input_register_2 = QuantumRegister(size=bits_per_input)
input_register_2_copy = QuantumRegister(size=bits_per_input)
if len(x1) == 0 and len(x2) == 0:
input_circuit_1 = encode_hadamard(bits_per_input)
input_circuit_1_copy = encode_hadamard_copy(bits_per_input)
input_circuit_2 = encode_hadamard(bits_per_input)
input_circuit_2_copy = encode_hadamard_copy(bits_per_input)
else:
input_circuit_1 = encode_input(x1)
input_circuit_1_copy = encode_input(x1)
input_circuit_2 = encode_input(x2)
input_circuit_2_copy = encode_input(x2)
carry_qubits_mult_1 = QuantumRegister(size=num_bits_after_mult)
carry_qubits_mult_2 = QuantumRegister(size=num_bits_after_mult)
carry_qubits_comparer = QuantumRegister(size=num_bits_comparer)
carry_qubit_addition = QuantumRegister(size=1) # 1 additional qubit for addition
if useQAE == False:
output_register = ClassicalRegister(size=nClassical)
circuit = QuantumCircuit(
input_register_1,
input_register_1_copy,
input_register_2,
input_register_2_copy,
carry_qubits_mult_1,
carry_qubits_mult_2,
carry_qubit_addition,
carry_qubits_comparer,
output_register
)
else:
circuit = QuantumCircuit(
input_register_1,
input_register_1_copy,
input_register_2,
input_register_2_copy,
carry_qubits_mult_1,
carry_qubits_mult_2,
carry_qubit_addition,
carry_qubits_comparer
)
# encoding
circuit.append(input_circuit_1, input_register_1[:])
if len(x1) == 0 and len(x2) == 0:
circuit.append(input_circuit_1_copy, input_register_1[:] + input_register_1_copy[:])
else:
circuit.append(input_circuit_1_copy, input_register_1_copy[:])
circuit.append(input_circuit_2, input_register_2[:])
if len(x1) == 0 and len(x2) == 0:
circuit.append(input_circuit_2_copy, input_register_2[:] + input_register_2_copy[:])
else:
circuit.append(input_circuit_2_copy, input_register_2_copy[:])
# multiplication
multiplicator = RGQFTMultiplier(num_state_qubits=bits_per_input)
circuit.append(multiplicator, input_register_1[:] + input_register_1_copy[:] + carry_qubits_mult_1[:])
circuit.append(multiplicator, input_register_2[:] + input_register_2_copy[:] + carry_qubits_mult_2[:])
# addition
adder = DraperQFTAdder(num_bits_after_mult, kind="half")
circuit.append(adder, carry_qubits_mult_1[:] + carry_qubits_mult_2[:] + carry_qubit_addition[:])
# inequality check if in circle
s = 2 ** (bits_per_input) - 1
comparer = IntegerComparator(num_bits_after_mult + 1, s * s + 1, False)
circuit.append(comparer, carry_qubits_mult_2[:] + carry_qubit_addition[:] + carry_qubits_comparer[:])
# readout
if useQAE == False:
circuit.measure(nQubits - num_bits_comparer, 0)
return circuit
def get_circuit(**kwargs):
bits_per_input =1 # num qubits per number
useQAE =True
circuit =operator(bits_per_input,[],[],useQAE)
#circuit.draw('mpl')
#backend = FakeMontreal()
#circuit_transpiled = transpile(circuit,backend)
#circuit_transpiled.draw('mpl')
#circuit_transpiled.depth()
backend =Aer.get_backend('qasm_simulator')
circuit_to_return = None
if useQAE == True:
quantum_instance = Aer.get_backend('qasm_simulator')
num_bits_after_mult = 2 * bits_per_input
num_bits_comparer =num_bits_after_mult+1
nQubits=4*bits_per_input + 2*num_bits_after_mult+1 +num_bits_comparer
problem = EstimationProblem(
state_preparation=circuit.decompose(), # A operator
objective_qubits=[nQubits-num_bits_comparer], # the "good" state Psi1 is identified as measuring |1> in qubit 0
)
print(f"Depth: ", circuit.depth())
print(f"Width: ", circuit.num_qubits)
ae = AmplitudeEstimation(
num_eval_qubits=3, # the number of evaluation qubits specifies circuit width and accuracy
quantum_instance=quantum_instance,
)
iae = IterativeAmplitudeEstimation(
epsilon_target=0.01, # target accuracy
alpha=0.05, # width of the confidence interval
quantum_instance=quantum_instance,
)
circuit_to_return = iae.construct_circuit(problem, measurement=True).decompose()
print(circuit_to_return)
result =iae.estimate(problem)
print(result)
else:
shots =1000
job= execute(circuit,backend=backend,shots=shots)
counts =job.result().get_counts()
print(counts)
#print(4 * counts['1'] /shots )
return circuit_to_return
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumCircuit
# n = 3 # number of qubits used to represent s
# s = '011' # the hidden binary string
# https://qiskit.org/textbook/ch-algorithms/bernstein-vazirani.html
def get_circuit(**kwargs):
n = kwargs["number_of_qubits"]
s = kwargs["s"]
# We need a circuit with n qubits, plus one ancilla qubit
# Also need n classical bits to write the output to
bv_circuit = QuantumCircuit(n + 1, n)
# put ancilla in state |->
bv_circuit.h(n)
bv_circuit.z(n)
# Apply Hadamard gates before querying the oracle
for i in range(n):
bv_circuit.h(i)
# Apply barrier
bv_circuit.barrier()
# Apply the inner-product oracle
s = s[::-1] # reverse s to fit qiskit's qubit ordering
for q in range(n):
if s[q] == '0':
bv_circuit.iden(q)
else:
bv_circuit.cx(q, n)
# Apply barrier
bv_circuit.barrier()
# Apply Hadamard gates after querying the oracle
for i in range(n):
bv_circuit.h(i)
# Measurement
for i in range(n):
bv_circuit.measure(i, i)
return bv_circuit
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
# https://github.com/Qiskit/qiskit-community-tutorials/blob/master/algorithms/bernstein_vazirani.ipynb
def get_circuit(**kwargs):
number_of_qubits = kwargs["number_of_qubits"]
a = kwargs["a"]
a = a % 2**(number_of_qubits) # a = a mod 2^(number_of_qubits)
print(a)
qr = QuantumRegister(number_of_qubits)
cr = ClassicalRegister(number_of_qubits)
qc = QuantumCircuit(qr, cr)
# hadamard gates
for i in range(number_of_qubits):
qc.h(qr[i])
qc.barrier()
# inner product oracle
for i in range(number_of_qubits):
if (a & (1 << i)): #if bin(a)[i] = 1 then use Z gate
qc.z(qr[i])
else:
qc.iden(qr[i]) # else (=0) use identity
qc.barrier()
# hadamard gates
for i in range(number_of_qubits):
qc.h(qr[i])
# measurement
qc.barrier(qr)
qc.measure(qr, cr)
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
# https://github.com/Qiskit/qiskit-community-tutorials/blob/master/algorithms/bernstein_vazirani.ipynb
# https://doi.org/10.1073/pnas.1618020114
def get_circuit(**kwargs):
number_of_qubits = kwargs["number_of_qubits"]
s = kwargs["s"]
s = s % 2 ** (number_of_qubits) # a = a mod 2^(number_of_qubits)
print(s)
qr = QuantumRegister(number_of_qubits + 1)
cr = ClassicalRegister(number_of_qubits)
qc = QuantumCircuit(qr, cr)
# hadamard gates
for i in range(number_of_qubits + 1):
qc.h(qr[i])
qc.z(qr[number_of_qubits])
# inner product oracle
for i in range(number_of_qubits):
if (s & (1 << i)): # if bin(s)[i] = 1 then use cnot with ancilla
qc.cx(qr[i], qr[number_of_qubits])
# hadamard gates
for i in range(number_of_qubits):
qc.h(qr[i])
# measurement
for i in range(number_of_qubits):
qc.measure(qr[i], cr[i])
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
qc = QuantumCircuit()
q = QuantumRegister(5, 'q')
c = ClassicalRegister(5, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
# add identity gates to the circuit to enable replacing the oracle after 2 gates per qubit
qc.i(q[0])
qc.i(q[1])
qc.i(q[2])
qc.i(q[3])
# ancilla qubit
qc.h(q[4])
qc.z(q[4])
qc.barrier()
# searched bit string: s = 00110 (first bit is ancilla and using qiskit's reverse qubit ordering)
qc.cx(q[1], q[4])
qc.cx(q[2], q[4])
qc.barrier()
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.i(q[4])
qc.measure([0, 1, 2, 3], [0, 1, 2, 3])
def get_circuit(**kwargs):
"""Get base circuit of the Bernstein-Vazirani algorithm."""
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute, Aer
# https://quantum-circuit.com/app_details/about/66bpe6Jf5mgQMahgd
# oracle = '(A | B) & (A | ~B) & (~A | B)'
qc = QuantumCircuit()
q = QuantumRegister(8, 'q')
c = ClassicalRegister(2, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.x(q[2])
qc.x(q[3])
qc.x(q[4])
qc.x(q[7])
qc.x(q[0])
qc.x(q[1])
qc.h(q[7])
qc.ccx(q[0], q[1], q[2])
qc.x(q[0])
qc.x(q[1])
qc.x(q[1])
qc.ccx(q[0], q[1], q[3])
qc.x(q[0])
qc.x(q[1])
qc.ccx(q[1], q[0], q[4])
qc.x(q[0])
qc.ccx(q[3], q[2], q[5])
qc.x(q[0])
qc.ccx(q[5], q[4], q[6])
qc.cx(q[6], q[7])
qc.ccx(q[4], q[5], q[6])
qc.ccx(q[2], q[3], q[5])
qc.x(q[4])
qc.ccx(q[0], q[1], q[4])
qc.x(q[0])
qc.x(q[1])
qc.x(q[3])
qc.ccx(q[0], q[1], q[3])
qc.x(q[0])
qc.x(q[1])
qc.x(q[2])
qc.x(q[1])
qc.ccx(q[0], q[1], q[2])
qc.x(q[0])
qc.x(q[1])
qc.h(q[0])
qc.h(q[1])
qc.x(q[0])
qc.x(q[1])
qc.cz(q[0], q[1])
qc.x(q[0])
qc.x(q[1])
qc.h(q[0])
qc.h(q[1])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
def get_circuit(**kwargs):
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
# https://quantum-circuit.com/app_details/about/LFQv9PKwerh3EzrLw
# searched oracle element is '0010'
qc = QuantumCircuit()
q = QuantumRegister(4, 'q')
ro = ClassicalRegister(4, 'ro')
qc.add_register(q)
qc.add_register(ro)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.x(q[0])
qc.x(q[2])
qc.x(q[3])
qc.crz(0.785398163397, q[0], q[3])
qc.cx(q[0], q[1])
qc.crz(-0.785398163397, q[1], q[3])
qc.cx(q[0], q[1])
qc.crz(0.785398163397, q[1], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.x(q[0])
qc.x(q[2])
qc.x(q[3])
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.x(q[0])
qc.x(q[1])
qc.x(q[2])
qc.x(q[3])
qc.crz(0.785398163397, q[0], q[3])
qc.cx(q[0], q[1])
qc.crz(-0.785398163397, q[1], q[3])
qc.cx(q[0], q[1])
qc.crz(0.785398163397, q[1], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.cx(q[1], q[2])
qc.crz(-0.785398163397, q[2], q[3])
qc.cx(q[0], q[2])
qc.crz(0.785398163397, q[2], q[3])
qc.x(q[0])
qc.x(q[1])
qc.x(q[2])
qc.x(q[3])
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.measure(q[0], ro[0])
qc.measure(q[1], ro[1])
qc.measure(q[2], ro[2])
qc.measure(q[3], ro[3])
def get_circuit(**kwargs):
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
import requests
from qiskit import QuantumCircuit
import json
def get_circuit(**kwargs):
adj_matrix = kwargs["adj_matrix"]
betas = kwargs["betas"]
gammas = kwargs["gammas"]
data = json.dumps({"adj_matrix": adj_matrix, "betas": betas, "gammas": gammas, })
headers = {"Content-Type": "application/json"}
response = requests.post('http://quantum-circuit-generator:5073/algorithms/qaoa/maxcut', data=data, headers=headers)
response_dict = json.loads(response.text)
if response_dict['circuit'] is not None:
circuit = QuantumCircuit.from_qasm_str(response_dict['circuit'])
return circuit
else:
return None
def post_processing(**kwargs):
adj_matrix = kwargs["adj_matrix"]
counts = kwargs["counts"]
data = json.dumps({"adj_matrix": adj_matrix, "counts": counts, "objFun": "Expectation", "visualization": "True"})
headers = {"Content-Type": "application/json"}
response = requests.post('http://objective-evaluation-service:5072/objective/max-cut', data=data, headers=headers)
return json.dumps(response.text)
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
# https://quantum-circuit.com/app_details/about/bw5r9HTiTHvQHtCB5
qc = QuantumCircuit()
q = QuantumRegister(5, 'q')
c = ClassicalRegister(3, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[1])
qc.cx(q[2], q[3])
qc.cu1(0, q[1], q[0])
qc.cx(q[2], q[4])
qc.h(q[0])
qc.cu1(0, q[1], q[2])
qc.cu1(0, q[0], q[2])
qc.h(q[2])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[2], c[2])
def get_circuit(**kwargs):
"""Get circuit of Shor with input 15."""
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
qc = QuantumCircuit()
q = QuantumRegister(5, 'q')
c = ClassicalRegister(5, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
# add identity gates to the circuit to enable replacing the oracle after 2 gates per qubit
qc.i(q[0])
qc.i(q[1])
qc.i(q[2])
qc.i(q[3])
# ancilla qubit
qc.h(q[4])
qc.z(q[4])
qc.barrier()
# searched bit string: s = 00110 (first bit is ancilla and using qiskit's reverse qubit ordering)
qc.cx(q[1], q[4])
qc.cx(q[2], q[4])
qc.barrier()
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[3])
qc.i(q[4])
qc.measure([0, 1, 2, 3], [0, 1, 2, 3])
def get_circuit(**kwargs):
"""Get base circuit of the Bernstein-Vazirani algorithm."""
return qc
|
https://github.com/UST-QuAntiL/nisq-analyzer-content
|
UST-QuAntiL
|
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit
# https://quantum-circuit.com/app_details/about/bw5r9HTiTHvQHtCB5
qc = QuantumCircuit()
q = QuantumRegister(5, 'q')
c = ClassicalRegister(3, 'c')
qc.add_register(q)
qc.add_register(c)
qc.h(q[0])
qc.h(q[1])
qc.h(q[2])
qc.h(q[1])
qc.cx(q[2], q[3])
qc.cu1(0, q[1], q[0])
qc.cx(q[2], q[4])
qc.h(q[0])
qc.cu1(0, q[1], q[2])
qc.cu1(0, q[0], q[2])
qc.h(q[2])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[2], c[2])
def get_circuit(**kwargs):
"""Get circuit of Shor with input 15."""
return qc
|
https://github.com/tula3and/qoupang
|
tula3and
|
from qiskit import *
# from qiskit import IBMQ
# IBMQ.save_account('<API Token>')
# provider = IBMQ.load_account()
# backend = IBMQ.get_provider(hub='ibm-q-kaist', group='internal', project='default').backends.ibmq_manhattan
backend = Aer.get_backend('qasm_simulator')
q = QuantumRegister(48)
c = ClassicalRegister(48)
circuit = QuantumCircuit(q,c)
circuit.h(q)
for i in range(47):
circuit.cx(q[i], q[47])
circuit.measure(q,c)
import string
table = string.ascii_uppercase + string.ascii_lowercase + string.digits
def hash8():
hash_result = ''
result = execute(circuit, backend, shots=1).result()
count = result.get_counts(circuit)
bits = max(count, key=lambda i: count[i])
start = 0
end = 6
while (end <= 48):
rand = int(bits[start:end], 2) % len(table)
start += 6
end += 6
hash_result += table[rand]
return hash_result
|
https://github.com/ashishpatel26/IBM-Quantum-Challenge-Fall-2021
|
ashishpatel26
|
#Let us begin by importing necessary libraries.
from qiskit import Aer
from qiskit.algorithms import VQE, QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import *
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit.utils import algorithm_globals
from qiskit_finance import QiskitFinanceError
from qiskit_finance.applications.optimization import PortfolioOptimization
from qiskit_finance.data_providers import *
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization.applications import OptimizationApplication
from qiskit_optimization.converters import QuadraticProgramToQubo
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import datetime
import warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
warnings.simplefilter("ignore", SymPyDeprecationWarning)
### Set parameters for assets and risk factor ###
num_assets = 4 # Number of assets to n = 4
q = 0.5 # Risk factor to q= 0.5
budget = 2 # set budget as defined in the problem (2 stands for budget)
seed = 132 #set random seed (point´s clarity)
### Generate time series data ###
stocks = [ ("STOCK%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(1955,11,5),
end=datetime.datetime(1985,10,26),
seed=seed)
data.run()
# Stocks
stocks
data._tickers
### Let's plot our finanical data ###
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('days')
plt.ylabel('stock value')
plt.show()
### Let's calculate the expected return for our problem data ###
# Returns a vector containing the mean value of each asset's expected return.
mu = data.get_period_return_mean_vector()
print(mu)
### Let's plot our covariance matrix Σ(sigma)###
sigma = data.get_period_return_covariance_matrix() #Returns the covariance matrix of the four assets
print(sigma)
fig, ax = plt.subplots(1,1)
im = plt.imshow(sigma, extent=[-1,1,-1,1])
x_label_list = ['stock3', 'stock2', 'stock1', 'stock0']
y_label_list = ['stock3', 'stock2', 'stock1', 'stock0']
ax.set_xticks([-0.75,-0.25,0.25,0.75])
ax.set_yticks([0.75,0.25,-0.25,-0.75])
ax.set_xticklabels(x_label_list)
ax.set_yticklabels(y_label_list)
plt.colorbar()
plt.clim(-0.000002, 0.00001)
plt.show()
##############################
### Provide your code here ###
from qiskit_finance.applications.optimization import PortfolioOptimization
mu = data.get_period_return_mean_vector() # Expected Return
sigma = data.get_period_return_covariance_matrix() # Covariance
q = 0.5 # Risk factor to q= 0.5
budget = 2 # set budget as defined in the problem (2 stands for budget)
portfolio_ = PortfolioOptimization(expected_returns=mu,
covariances=sigma,
risk_factor=q,budget=budget)
# Transform Portfolio into Quadratic Program
qp = portfolio_.to_quadratic_program()
##############################
print(qp)
# Check your answer and submit using the following code
from qc_grader import grade_ex1a
grade_ex1a(qp)
exact_mes = NumPyMinimumEigensolver()
exact_eigensolver = MinimumEigenOptimizer(exact_mes)
result = exact_eigensolver.solve(qp)
print(result)
optimizer = SLSQP(maxiter=1000)
algorithm_globals.random_seed = 1234
backend = Aer.get_backend('statevector_simulator')
##############################
# Provide your code here
from qiskit.algorithms import VQE
vqe = VQE(optimizer=optimizer, quantum_instance=backend)
##############################
vqe_meo = MinimumEigenOptimizer(vqe) #please do not change this code
result = vqe_meo.solve(qp) #please do not change this code
print(result) #please do not change this code
# Check your answer and submit using the following code
from qc_grader import grade_ex1b
grade_ex1b(vqe, qp)
#Step 1: Let us begin by importing necessary libraries
import qiskit
from qiskit import Aer
from qiskit.algorithms import VQE, QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import *
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit.utils import algorithm_globals
from qiskit_finance import QiskitFinanceError
from qiskit_finance.applications.optimization import *
from qiskit_finance.data_providers import *
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization.applications import OptimizationApplication
from qiskit_optimization.converters import QuadraticProgramToQubo
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import datetime
import warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
warnings.simplefilter("ignore",SymPyDeprecationWarning)
# Step 2. Generate time series data for four assets.
# Do not change start/end dates specified to generate problem data.
seed = 132
num_assets = 4
stocks = [("STOCK%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(1955,11,5),
end=datetime.datetime(1985,10,26),
seed=seed)
data.run()
# Let's plot our finanical data (We are generating the same time series data as in the previous example.)
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('days')
plt.ylabel('stock value')
plt.show()
# Step 3. Calculate mu and sigma for this problem
mu2 = data.get_period_return_mean_vector() #Returns a vector containing the mean value of each asset.
sigma2 = data.get_period_return_covariance_matrix() #Returns the covariance matrix associated with the assets.
print(mu2, sigma2)
# Step 4. Set parameters and constraints based on this challenge 1c
##############################
# Provide your code here
q2 = 0.5 #Set risk factor to 0.5
budget2 = 3 #Set budget to 3
##############################
# Step 5. Complete code to generate the portfolio instance
##############################
# Provide your code here
#The bounds must be a list of tuples of integers.
portfolio2 = PortfolioOptimization(expected_returns=mu2,
covariances=sigma2,
risk_factor=q2,budget=budget2,
bounds = [(0,2), (0,2), (0,2), (0,2)]
)
qp2 = portfolio2.to_quadratic_program()
##############################
# Step 6. Now let's use QAOA to solve this problem.
optimizer = SLSQP(maxiter=1000)
algorithm_globals.random_seed = 1234
backend = Aer.get_backend('statevector_simulator')
##############################
# Provide your code here
qaoa = QAOA(optimizer =optimizer, reps=3, quantum_instance=backend)
##############################
qaoa_meo = MinimumEigenOptimizer(qaoa) #please do not change this code
result2 = qaoa_meo.solve(qp2) #please do not change this code
print(result2) #please do not change this code
# Check your answer and submit using the following code
from qc_grader import grade_ex1c
grade_ex1c(qaoa, qp2)
|
https://github.com/ashishpatel26/IBM-Quantum-Challenge-Fall-2021
|
ashishpatel26
|
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver
# PSPCz molecule
geometry = [['C', [ -0.2316640, 1.1348450, 0.6956120]],
['C', [ -0.8886300, 0.3253780, -0.2344140]],
['C', [ -0.1842470, -0.1935670, -1.3239330]],
['C', [ 1.1662930, 0.0801450, -1.4737160]],
['C', [ 1.8089230, 0.8832220, -0.5383540]],
['C', [ 1.1155860, 1.4218050, 0.5392780]],
['S', [ 3.5450920, 1.2449890, -0.7349240]],
['O', [ 3.8606900, 1.0881590, -2.1541690]],
['C', [ 4.3889120, -0.0620730, 0.1436780]],
['O', [ 3.8088290, 2.4916780, -0.0174650]],
['C', [ 4.6830900, 0.1064460, 1.4918230]],
['C', [ 5.3364470, -0.9144080, 2.1705280]],
['C', [ 5.6895490, -2.0818670, 1.5007820]],
['C', [ 5.4000540, -2.2323130, 0.1481350]],
['C', [ 4.7467230, -1.2180160, -0.5404770]],
['N', [ -2.2589180, 0.0399120, -0.0793330]],
['C', [ -2.8394600, -1.2343990, -0.1494160]],
['C', [ -4.2635450, -1.0769890, 0.0660760]],
['C', [ -4.5212550, 0.2638010, 0.2662190]],
['C', [ -3.2669630, 0.9823890, 0.1722720]],
['C', [ -2.2678900, -2.4598950, -0.3287380]],
['C', [ -3.1299420, -3.6058560, -0.3236210]],
['C', [ -4.5179520, -3.4797390, -0.1395160]],
['C', [ -5.1056310, -2.2512990, 0.0536940]],
['C', [ -5.7352450, 1.0074800, 0.5140960]],
['C', [ -5.6563790, 2.3761270, 0.6274610]],
['C', [ -4.4287740, 3.0501460, 0.5083650]],
['C', [ -3.2040560, 2.3409470, 0.2746950]],
['H', [ -0.7813570, 1.5286610, 1.5426490]],
['H', [ -0.7079140, -0.7911480, -2.0611600]],
['H', [ 1.7161320, -0.2933710, -2.3302930]],
['H', [ 1.6308220, 2.0660550, 1.2427990]],
['H', [ 4.4214900, 1.0345500, 1.9875450]],
['H', [ 5.5773000, -0.7951290, 3.2218590]],
['H', [ 6.2017810, -2.8762260, 2.0345740]],
['H', [ 5.6906680, -3.1381740, -0.3739110]],
['H', [ 4.5337010, -1.3031330, -1.6001680]],
['H', [ -1.1998460, -2.5827750, -0.4596910]],
['H', [ -2.6937370, -4.5881470, -0.4657540]],
['H', [ -5.1332290, -4.3740010, -0.1501080]],
['H', [ -6.1752900, -2.1516170, 0.1987120]],
['H', [ -6.6812260, 0.4853900, 0.6017680]],
['H', [ -6.5574610, 2.9529350, 0.8109620]],
['H', [ -4.3980410, 4.1305040, 0.5929440]],
['H', [ -2.2726630, 2.8838620, 0.1712760]]]
molecule = Molecule(geometry=geometry, charge=0, multiplicity=1)
driver = ElectronicStructureMoleculeDriver(molecule=molecule,
basis='631g*',
driver_type=ElectronicStructureDriverType.PYSCF)
C_counter = 0
H_counter = 0
N_counter = 0
O_counter = 0
S_counter = 0
for i in range(len(molecule.geometry)):
if molecule.geometry[i][0] == "C":
C_counter = C_counter + 1
elif molecule.geometry[i][0] == "H":
H_counter = H_counter + 1
elif molecule.geometry[i][0] == "N":
N_counter = N_counter + 1
elif molecule.geometry[i][0] == "O":
O_counter = O_counter + 1
elif molecule.geometry[i][0] == "S":
S_counter = S_counter + 1
num_ao = {
'C': 14,
'H': 2,
'N': 14,
'O': 14,
'S': 18,
}
##############################
# Provide your code here
num_C_atom = C_counter
num_H_atom = H_counter
num_N_atom = N_counter
num_O_atom = O_counter
num_S_atom = S_counter
num_atoms_total = len(molecule.atoms)
num_AO_total = (14*num_C_atom)+(2*num_H_atom)+(14*num_N_atom)+(14*num_O_atom)+(18*num_S_atom)
num_MO_total = num_AO_total
##############################
answer_ex2a ={
'C': num_C_atom,
'H': num_H_atom,
'N': num_N_atom,
'O': num_O_atom,
'S': num_S_atom,
'atoms': num_atoms_total,
'AOs': num_AO_total,
'MOs': num_MO_total
}
print(answer_ex2a)
# Check your answer and submit using the following code
from qc_grader import grade_ex2a
grade_ex2a(answer_ex2a)
from qiskit_nature.drivers.second_quantization import HDF5Driver
driver_reduced = HDF5Driver("resources/PSPCz_reduced.hdf5")
properties = driver_reduced.run()
#print(properties)
from qiskit_nature.properties.second_quantization.electronic import ElectronicEnergy
electronic_energy = properties.get_property(ElectronicEnergy)
print(electronic_energy)
from qiskit_nature.results import ElectronicStructureResult
import numpy as np
# some dummy result
result = ElectronicStructureResult()
result.eigenenergies = np.asarray([-1])
result.computed_energies = np.asarray([-1])
# now, let's interpret it
electronic_energy.interpret(result)
print(result)
from qiskit_nature.properties.second_quantization.electronic import ParticleNumber
particle = (ParticleNumber(4, (2,2)))
print( particle)
from qiskit_nature.properties.second_quantization.electronic import ParticleNumber
##############################
# Provide your code here
#particle_number = electronic_energy.
num_electron = 2 # 2 electrons
num_MO = 2 # 2 molecular orbitals: Alpha-Beta
num_SO = 4 # 4 SOs
num_qubits = 4 # 4 qubits = 16 combinations: AlphaAlpha, AlphaBeta, BetaAlpha, BetaBeta
##############################
answer_ex2b = {
'electrons': num_electron,
'MOs': num_MO,
'SOs': num_SO,
'qubits': num_qubits
}
print(answer_ex2b)
# Check your answer and submit using the following code
from qc_grader import grade_ex2b
grade_ex2b(answer_ex2b)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
##############################
# Provide your code here
es_problem = ElectronicStructureProblem(driver_reduced)
##############################
second_q_op = es_problem.second_q_ops()
print(second_q_op[0])
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper, BravyiKitaevMapper
##############################
# Provide your code here
qubit_converter = QubitConverter(mapper=JordanWignerMapper())
##############################
qubit_op = qubit_converter.convert(second_q_op[0])
print(qubit_op)
from qiskit_nature.circuit.library import HartreeFock
##############################
# Provide your code here
init_state = HartreeFock(num_spin_orbitals= num_SO,
num_particles= es_problem.num_particles,
qubit_converter= qubit_converter)
##############################
init_state.draw()
from qiskit.circuit.library import EfficientSU2, TwoLocal, NLocal, PauliTwoDesign
from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD
##############################
# Provide your code here
ansatz = TwoLocal(num_qubits=num_qubits, rotation_blocks=(["h","ry"]),
entanglement_blocks="cz",entanglement="linear",
reps=2)
##############################
ansatz.decompose().draw()
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit_nature.algorithms import GroundStateEigensolver
##############################
# Provide your code here
numpy_solver = NumPyMinimumEigensolver()
numpy_ground_state_solver = GroundStateEigensolver(qubit_converter, numpy_solver)
numpy_results = numpy_ground_state_solver.solve(es_problem)
##############################
exact_energy = numpy_results.computed_energies[0]
print(f"Exact electronic energy: {exact_energy:.6f} Hartree\n")
print(numpy_results)
# Check your answer and submit using the following code
from qc_grader import grade_ex2c
grade_ex2c(numpy_results)
from qiskit.providers.aer import StatevectorSimulator, QasmSimulator
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP
##############################
# Provide your code here
backend = StatevectorSimulator()
optimizer = COBYLA(maxiter=1000)
##############################
from qiskit.algorithms import VQE
from qiskit_nature.algorithms import VQEUCCFactory, GroundStateEigensolver
from jupyterplot import ProgressPlot
import numpy as np
error_threshold = 10 # mHartree
np.random.seed(5) # fix seed for reproducibility
initial_point = np.random.random(ansatz.num_parameters)
# for live plotting
pp = ProgressPlot(plot_names=['Energy'],
line_names=['Runtime VQE', f'Target + {error_threshold}mH', 'Target'])
intermediate_info = {
'nfev': [],
'parameters': [],
'energy': [],
'stddev': []
}
def callback(nfev, parameters, energy, stddev):
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['energy'].append(energy)
intermediate_info['stddev'].append(stddev)
pp.update([[energy, exact_energy+error_threshold/1000, exact_energy]])
##############################
# Provide your code here
vqe = VQE(ansatz = ansatz,
quantum_instance = backend)
vqe_ground_state_solver = GroundStateEigensolver(qubit_converter, vqe)
vqe_results = vqe_ground_state_solver.solve(es_problem)
##############################
print(vqe_results)
error = (vqe_results.computed_energies[0] - exact_energy) * 1000 # mHartree
print(f'Error is: {error:.3f} mHartree')
# Check your answer and submit using the following code
from qc_grader import grade_ex2d
grade_ex2d(vqe_results)
from qiskit_nature.algorithms import QEOM
##############################
# Provide your code here
qeom_excited_state_solver = QEOM(vqe_ground_state_solver, "sd")
qeom_results = qeom_excited_state_solver.solve(problem=es_problem)
##############################
print(qeom_results)
# Check your answer and submit using the following code
from qc_grader import grade_ex2e
grade_ex2e(qeom_results)
bandgap = qeom_results.computed_energies[1] - qeom_results.computed_energies[0]
bandgap # in Hartree
from qiskit import IBMQ
IBMQ.load_account()
from qc_grader.util import get_challenge_provider
provider = get_challenge_provider()
if provider:
backend = provider.get_backend('ibmq_qasm_simulator')
from qiskit_nature.runtime import VQEProgram
error_threshold = 10 # mHartree
# for live plotting
pp = ProgressPlot(plot_names=['Energy'],
line_names=['Runtime VQE', f'Target + {error_threshold}mH', 'Target'])
intermediate_info = {
'nfev': [],
'parameters': [],
'energy': [],
'stddev': []
}
# Provide your code here
optimizer = {
'name': 'QN-SPSA', # leverage the Quantum Natural SPSA
# 'name': 'SPSA', # set to ordinary SPSA
'maxiter': 100,
}
def callback(nfev, parameters, energy, stddev):
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['energy'].append(energy)
intermediate_info['stddev'].append(stddev)
pp.update([[energy,exact_energy+error_threshold/1000, exact_energy]])
##############################
runtime_vqe = VQEProgram(ansatz=ansatz,
optimizer=optimizer,
initial_point=initial_point,
provider=provider,
backend=backend,
shots=1024,
callback=callback)
##############################
# Submit a runtime job using the following code
from qc_grader import prepare_ex2f
runtime_job = prepare_ex2f(runtime_vqe, qubit_converter, es_problem)
# Check your answer and submit using the following code
from qc_grader import grade_ex2f
grade_ex2f(runtime_job)
print(runtime_job.result().get("eigenvalue"))
# Please change backend to ibm_perth before running the following code
runtime_job_real_device = prepare_ex2f(runtime_vqe, qubit_converter, es_problem, real_device=True)
print(runtime_job_real_device.result().get("eigenvalue"))
|
https://github.com/ashishpatel26/IBM-Quantum-Challenge-Fall-2021
|
ashishpatel26
|
# General imports
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from pylab import cm
import warnings
warnings.filterwarnings("ignore")
# scikit-learn imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Qiskit imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
# Load MNIST dataset
DATA_PATH = './resources/ch3_part1.npz'
data = np.load(DATA_PATH)
sample_train = data['sample_train']
labels_train = data['labels_train']
sample_test = data['sample_test']
# Split train data
sample_train, sample_val, labels_train, labels_val = train_test_split(
sample_train, labels_train, test_size=0.2, random_state=42)
# Visualize samples
fig = plt.figure()
LABELS = [4, 9]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = sample_train[labels_train==LABELS[i]][0].reshape((28, 28))
ax.imshow(img, cmap="Greys")
# Standardize
ss = StandardScaler()
sample_train = ss.fit_transform(sample_train)
sample_val = ss.transform(sample_val)
sample_test = ss.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
sample_train = pca.fit_transform(sample_train)
sample_val = pca.transform(sample_val)
sample_test = pca.transform(sample_test)
# Normalize
mms = MinMaxScaler((-1, 1))
sample_train = mms.fit_transform(sample_train)
sample_val = mms.transform(sample_val)
sample_test = mms.transform(sample_test)
# 3 features, depth 2
map_z = ZFeatureMap(feature_dimension=3, reps=2)
map_z.decompose().draw('mpl')
# 3 features, depth 1, linear entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='linear')
map_zz.decompose().draw('mpl')
# 3 features, depth 1, circular entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='circular')
map_zz.decompose().draw('mpl')
# 3 features, depth 1
map_pauli = PauliFeatureMap(feature_dimension=3, reps=1, paulis = ['X', 'Y', 'ZZ'])
map_pauli.decompose().draw('mpl')
twolocal = TwoLocal(num_qubits=3, reps=2, rotation_blocks=['ry','rz'],
entanglement_blocks='cx', entanglement='circular', insert_barriers=True)
twolocal.decompose().draw('mpl')
twolocaln = NLocal(num_qubits=3, reps=2,
rotation_blocks=[RYGate(Parameter('a')), RZGate(Parameter('a'))],
entanglement_blocks=CXGate(),
entanglement='circular', insert_barriers=True)
twolocaln.decompose().draw('mpl')
print(f'First training data: {sample_train[0]}')
encode_map = PauliFeatureMap(feature_dimension=N_DIM, reps=1, paulis = ['X', 'Y', 'ZZ'])
encode_circuit = encode_map.bind_parameters(sample_train[0])
encode_circuit.decompose().draw(output='mpl')
##############################
# Provide your code here
ex3a_fmap = ZZFeatureMap(feature_dimension=5, reps=3, entanglement='circular')
##############################
# Check your answer and submit using the following code
from qc_grader import grade_ex3a
grade_ex3a(ex3a_fmap)
pauli_map = PauliFeatureMap(feature_dimension=N_DIM, reps=1, paulis = ['X', 'Y', 'ZZ'])
pauli_kernel = QuantumKernel(feature_map=pauli_map, quantum_instance=Aer.get_backend('statevector_simulator'))
print(f'First training data : {sample_train[0]}')
print(f'Second training data: {sample_train[1]}')
pauli_circuit = pauli_kernel.construct_circuit(sample_train[0], sample_train[1])
pauli_circuit.decompose().decompose().draw(output='mpl')
backend = Aer.get_backend('qasm_simulator')
job = execute(pauli_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(pauli_circuit)
print(f"Transition amplitude: {counts['0'*N_DIM]/sum(counts.values())}")
matrix_train = pauli_kernel.evaluate(x_vec=sample_train)
matrix_val = pauli_kernel.evaluate(x_vec=sample_val, y_vec=sample_train)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(matrix_train),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(matrix_val),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("validation kernel matrix")
plt.show()
x = [-0.5, -0.4, 0.3, 0, -0.9]
y = [0, -0.7, -0.3, 0, -0.4]
##############################
# Provide your code here
zz = ZZFeatureMap(feature_dimension=N_DIM, reps=3, entanglement='circular')
zz_kernel = QuantumKernel(feature_map=zz, quantum_instance=Aer.get_backend('statevector_simulator'))
zz_circuit = zz_kernel.construct_circuit(x, y)
backend = Aer.get_backend('qasm_simulator')
job = execute(zz_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(zz_circuit)
ex3b_amp = counts['0'*N_DIM]/sum(counts.values())
##############################
# Check your answer and submit using the following code
from qc_grader import grade_ex3b
grade_ex3b(ex3b_amp)
pauli_svc = SVC(kernel='precomputed')
pauli_svc.fit(matrix_train, labels_train)
pauli_score = pauli_svc.score(matrix_val, labels_val)
print(f'Precomputed kernel classification test score: {pauli_score*100}%')
# Load MNIST dataset
DATA_PATH = './resources/ch3_part2.npz'
data = np.load(DATA_PATH)
sample_train = data['sample_train']
labels_train = data['labels_train']
sample_test = data['sample_test']
# Split train data
sample_train, sample_val, labels_train, labels_val = train_test_split(
sample_train, labels_train, test_size=0.2, random_state=42)
# Visualize samples
fig = plt.figure()
LABELS = [0, 2, 3]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = sample_train[labels_train==LABELS[i]][0].reshape((28, 28))
ax.imshow(img, cmap="Greys")
# Standardize
standard_scaler = StandardScaler()
sample_train = standard_scaler.fit_transform(sample_train)
sample_val = standard_scaler.transform(sample_val)
sample_test = standard_scaler.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
sample_train = pca.fit_transform(sample_train)
sample_val = pca.transform(sample_val)
sample_test = pca.transform(sample_test)
# Normalize
min_max_scaler = MinMaxScaler((-1, 1))
sample_train = min_max_scaler.fit_transform(sample_train)
sample_val = min_max_scaler.transform(sample_val)
sample_test = min_max_scaler.transform(sample_test)
labels_train_0 = np.where(labels_train==0, 1, 0)
labels_val_0 = np.where(labels_val==0, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 0 vs Rest: {labels_val_0}')
labels_train_2 = np.where(labels_train==2, 1, 0)
labels_val_2 = np.where(labels_val==2, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 2 vs Rest: {labels_val_2}')
labels_train_3 = np.where(labels_train==3, 1, 0)
labels_val_3 = np.where(labels_val==3, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 2 vs Rest: {labels_val_3}')
pauli_map_0 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement = 'linear')
pauli_kernel_0 = QuantumKernel(feature_map=pauli_map_0, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_0 = SVC(kernel='precomputed', probability=True)
matrix_train_0 = pauli_kernel_0.evaluate(x_vec=sample_train)
pauli_svc_0.fit(matrix_train_0, labels_train_0)
matrix_val_0 = pauli_kernel_0.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_0 = pauli_svc_0.score(matrix_val_0, labels_val_0)
print(f'Accuracy of discriminating between label 0 and others: {pauli_score_0*100}%')
pauli_map_2 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement='linear')
pauli_kernel_2 = QuantumKernel(feature_map=pauli_map_2, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_2 = SVC(kernel='precomputed', probability=True)
matrix_train_2 = pauli_kernel_2.evaluate(x_vec=sample_train)
pauli_svc_2.fit(matrix_train_2, labels_train_2)
matrix_val_2 = pauli_kernel_2.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_2 = pauli_svc_2.score(matrix_val_2, labels_val_2)
print(f'Accuracy of discriminating between label 2 and others: {pauli_score_2*100}%')
pauli_map_3 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement = 'linear')
pauli_kernel_3 = QuantumKernel(feature_map=pauli_map_3, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_3 = SVC(kernel='precomputed', probability=True)
matrix_train_3 = pauli_kernel_3.evaluate(x_vec=sample_train)
pauli_svc_3.fit(matrix_train_3, labels_train_3)
matrix_val_3 = pauli_kernel_3.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_3 = pauli_svc_3.score(matrix_val_3, labels_val_3)
print(f'Accuracy of discriminating between label 3 and others: {pauli_score_3*100}%')
matrix_test_0 = pauli_kernel_0.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_0 = pauli_svc_0.predict_proba(matrix_test_0)[:, 1]
print(f'Probability of label 0: {np.round(pred_0, 2)}')
matrix_test_2 = pauli_kernel_2.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_2 = pauli_svc_2.predict_proba(matrix_test_2)[:, 1]
print(f'Probability of label 2: {np.round(pred_2, 2)}')
matrix_test_3 = pauli_kernel_3.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_3 = pauli_svc_3.predict_proba(matrix_test_3)[:, 1]
print(f'Probability of label 3: {np.round(pred_3, 2)}')
##############################
# Provide your code here
pred_2 = pauli_svc_2.predict_proba(matrix_test_2)[:, 1]
##############################
##############################
# Provide your code here
pred_3 = pauli_svc_3.predict_proba(matrix_test_3)[:, 1]
##############################
sample_pred = np.load('./resources/ch3_part2_sub.npy')
print(f'Sample prediction: {sample_pred}')
pred_2_ex = np.array([0.7])
pred_3_ex = np.array([0.2])
pred_test_ex = np.where((pred_2_ex > pred_3_ex), 2, 3)
print(f'Prediction: {pred_test_ex}')
pred_2_ex = np.array([0.7, 0.1])
pred_3_ex = np.array([0.2, 0.6])
pred_test_ex = np.where((pred_2_ex > pred_3_ex), 2, 3)
print(f'Prediction: {pred_test_ex}')
##############################
# Provide your code here
prob_0 = np.array(np.round(pred_0,2))
prob_2 = np.array(np.round(pred_2,2))
prob_3 = np.array(np.round(pred_3,2))
def pred(pred_0, pred_2, pred_3):
prediction=[]
for i in range(len(pred_0)):
if pred_0[i]>pred_2[i] and pred_0[i]>pred_3[i]:
prediction.append(0)
elif pred_2[i]>pred_0[i] and pred_2[i]>pred_3[i]:
prediction.append(2)
else:
prediction.append(3)
return np.array(prediction)
test = pred(prob_0, prob_2, prob_3)
pred_test = np.array(test)
print(pred_test)
##############################
print(f'Sample prediction: {sample_pred}')
# Check your answer and submit using the following code
from qc_grader import grade_ex3c
grade_ex3c(pred_test, sample_train,
standard_scaler, pca, min_max_scaler,
pauli_kernel_0, pauli_kernel_2, pauli_kernel_3,
pauli_svc_0, pauli_svc_2, pauli_svc_3)
|
https://github.com/ashishpatel26/IBM-Quantum-Challenge-Fall-2021
|
ashishpatel26
|
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit import Aer
from qiskit.utils import algorithm_globals, QuantumInstance
from qiskit.algorithms import QAOA, NumPyMinimumEigensolver
import numpy as np
val = [5,6,7,8,9]
wt = [4,5,6,7,8]
W = 18
def dp(W, wt, val, n):
k = [[0 for x in range(W + 1)] for x in range(n + 1)]
for i in range(n + 1):
for w in range(W + 1):
if i == 0 or w == 0:
k[i][w] = 0
elif wt[i-1] <= w:
k[i][w] = max(val[i-1] + k[i-1][w-wt[i-1]], k[i-1][w])
else:
k[i][w] = k[i-1][w]
picks=[0 for x in range(n)]
volume=W
for i in range(n,-1,-1):
if (k[i][volume]>k[i-1][volume]):
picks[i-1]=1
volume -= wt[i-1]
return k[n][W],picks
n = len(val)
print("optimal value:", dp(W, wt, val, n)[0])
print('\n index of the chosen items:')
for i in range(n):
if dp(W, wt, val, n)[1][i]:
print(i,end=' ')
# import packages necessary for application classes.
from qiskit_optimization.applications import Knapsack
def knapsack_quadratic_program():
# Put values, weights and max_weight parameter for the Knapsack()
##############################
# Provide your code here
prob = Knapsack(val, wt, W)
#
##############################
# to_quadratic_program generates a corresponding QuadraticProgram of the instance of the knapsack problem.
kqp = prob.to_quadratic_program()
return prob, kqp
prob,quadratic_program=knapsack_quadratic_program()
quadratic_program
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(quadratic_program)
print('result:\n', result)
print('\n index of the chosen items:', prob.interpret(result))
# QAOA
seed = 123
algorithm_globals.random_seed = seed
qins = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), shots=1000, seed_simulator=seed, seed_transpiler=seed)
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, quantum_instance=qins))
result = meo.solve(quadratic_program)
print('result:\n', result)
print('\n index of the chosen items:', prob.interpret(result))
# Check your answer and submit using the following code
from qc_grader import grade_ex4a
grade_ex4a(quadratic_program)
L1 = [5,3,3,6,9,7,1]
L2 = [8,4,5,12,10,11,2]
C1 = [1,1,2,1,1,1,2]
C2 = [3,2,3,2,4,3,3]
C_max = 16
def knapsack_argument(L1, L2, C1, C2, C_max):
##############################
# Provide your code here
values = [j-i for i,j in zip(L1,L2)]
weights = [j-i for i,j in zip(C1,C2)]
max_weight = max([i+j for i,j in zip(L1, L2)])
#
##############################
return values, weights, max_weight
values, weights, max_weight = knapsack_argument(L1, L2, C1, C2, C_max)
print(values, weights, max_weight)
prob = Knapsack(values = values, weights = weights, max_weight = max_weight)
qp = prob.to_quadratic_program()
qp
# Check your answer and submit using the following code
from qc_grader import grade_ex4b
grade_ex4b(knapsack_argument)
# QAOA
seed = 123
algorithm_globals.random_seed = seed
qins = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), shots=1000, seed_simulator=seed, seed_transpiler=seed)
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, quantum_instance=qins))
result = meo.solve(qp)
print('result:', result.x)
item = np.array(result.x)
revenue=0
for i in range(len(item)):
if item[i]==0:
revenue+=L1[i]
else:
revenue+=L2[i]
print('total revenue:', revenue)
instance_examples = [
{
'L1': [3, 7, 3, 4, 2, 6, 2, 2, 4, 6, 6],
'L2': [7, 8, 7, 6, 6, 9, 6, 7, 6, 7, 7],
'C1': [2, 2, 2, 3, 2, 4, 2, 2, 2, 2, 2],
'C2': [4, 3, 3, 4, 4, 5, 3, 4, 4, 3, 4],
'C_max': 33
},
{
'L1': [4, 2, 2, 3, 5, 3, 6, 3, 8, 3, 2],
'L2': [6, 5, 8, 5, 6, 6, 9, 7, 9, 5, 8],
'C1': [3, 3, 2, 3, 4, 2, 2, 3, 4, 2, 2],
'C2': [4, 4, 3, 5, 5, 3, 4, 5, 5, 3, 5],
'C_max': 38
},
{
'L1': [5, 4, 3, 3, 3, 7, 6, 4, 3, 5, 3],
'L2': [9, 7, 5, 5, 7, 8, 8, 7, 5, 7, 9],
'C1': [2, 2, 4, 2, 3, 4, 2, 2, 2, 2, 2],
'C2': [3, 4, 5, 4, 4, 5, 3, 3, 5, 3, 5],
'C_max': 35
}
]
from typing import List, Union
import math
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, assemble
from qiskit.compiler import transpile
from qiskit.circuit import Gate
from qiskit.circuit.library.standard_gates import *
from qiskit.circuit.library import QFT
def phase_return(index_qubits: int, gamma: float, L1: list, L2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
##############################
### U_1(gamma * (lambda2 - lambda1)) for each qubit ###
# Provide your code here
##############################
return qc.to_gate(label=" phase return ") if to_gate else qc
def subroutine_add_const(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qc = QuantumCircuit(data_qubits)
##############################
### Phase Rotation ###
# Provide your code here
##############################
return qc.to_gate(label=" [+"+str(const)+"] ") if to_gate else qc
def const_adder(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_data)
##############################
### QFT ###
# Provide your code here
##############################
##############################
### Phase Rotation ###
# Use `subroutine_add_const`
##############################
##############################
### IQFT ###
# Provide your code here
##############################
return qc.to_gate(label=" [ +" + str(const) + "] ") if to_gate else qc
def cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
for i, (val1, val2) in enumerate(zip(list1, list2)):
##############################
### Add val2 using const_adder controlled by i-th index register (set to 1) ###
# Provide your code here
##############################
qc.x(qr_index[i])
##############################
### Add val1 using const_adder controlled by i-th index register (set to 0) ###
# Provide your code here
##############################
qc.x(qr_index[i])
return qc.to_gate(label=" Cost Calculation ") if to_gate else qc
def constraint_testing(data_qubits: int, C_max: int, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
##############################
### Set the flag register for indices with costs larger than C_max ###
# Provide your code here
##############################
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
def penalty_dephasing(data_qubits: int, alpha: float, gamma: float, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
##############################
### Phase Rotation ###
# Provide your code here
##############################
return qc.to_gate(label=" Penalty Dephasing ") if to_gate else qc
def reinitialization(index_qubits: int, data_qubits: int, C1: list, C2: list, C_max: int, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_index, qr_data, qr_f)
##############################
### Reinitialization Circuit ###
# Provide your code here
##############################
return qc.to_gate(label=" Reinitialization ") if to_gate else qc
def mixing_operator(index_qubits: int, beta: float, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
##############################
### Mixing Operator ###
# Provide your code here
##############################
return qc.to_gate(label=" Mixing Operator ") if to_gate else qc
def solver_function(L1: list, L2: list, C1: list, C2: list, C_max: int) -> QuantumCircuit:
# the number of qubits representing answers
index_qubits = len(L1)
# the maximum possible total cost
max_c = sum([max(l0, l1) for l0, l1 in zip(C1, C2)])
# the number of qubits representing data values can be defined using the maximum possible total cost as follows:
data_qubits = math.ceil(math.log(max_c, 2)) + 1 if not max_c & (max_c - 1) == 0 else math.ceil(math.log(max_c, 2)) + 2
### Phase Operator ###
# return part
def phase_return():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def subroutine_add_const():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def const_adder():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def cost_calculation():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def constraint_testing():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def penalty_dephasing():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def reinitialization():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
### Mixing Operator ###
def mixing_operator():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
qr_index = QuantumRegister(index_qubits, "index") # index register
qr_data = QuantumRegister(data_qubits, "data") # data register
qr_f = QuantumRegister(1, "flag") # flag register
cr_index = ClassicalRegister(index_qubits, "c_index") # classical register storing the measurement result of index register
qc = QuantumCircuit(qr_index, qr_data, qr_f, cr_index)
### initialize the index register with uniform superposition state ###
qc.h(qr_index)
### DO NOT CHANGE THE CODE BELOW
p = 5
alpha = 1
for i in range(p):
### set fixed parameters for each round ###
beta = 1 - (i + 1) / p
gamma = (i + 1) / p
### return part ###
qc.append(phase_return(index_qubits, gamma, L1, L2), qr_index)
### step 1: cost calculation ###
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2), qr_index[:] + qr_data[:])
### step 2: Constraint testing ###
qc.append(constraint_testing(data_qubits, C_max), qr_data[:] + qr_f[:])
### step 3: penalty dephasing ###
qc.append(penalty_dephasing(data_qubits, alpha, gamma), qr_data[:] + qr_f[:])
### step 4: reinitialization ###
qc.append(reinitialization(index_qubits, data_qubits, C1, C2, C_max), qr_index[:] + qr_data[:] + qr_f[:])
### mixing operator ###
qc.append(mixing_operator(index_qubits, beta), qr_index)
### measure the index ###
### since the default measurement outcome is shown in big endian, it is necessary to reverse the classical bits in order to unify the endian ###
qc.measure(qr_index, cr_index[::-1])
return qc
# Execute your circuit with following prepare_ex4c() function.
# The prepare_ex4c() function works like the execute() function with only QuantumCircuit as an argument.
from qc_grader import prepare_ex4c
job = prepare_ex4c(solver_function)
result = job.result()
# Check your answer and submit using the following code
from qc_grader import grade_ex4c
grade_ex4c(job)
|
https://github.com/kad99kev/MacHacks-QuantumVRP
|
kad99kev
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, visualization
from random import randint
def to_binary(N,n_bit):
Nbin = np.zeros(n_bit, dtype=bool)
for i in range(1,n_bit+1):
bit_state = (N % (2**i) != 0)
if bit_state:
N -= 2**(i-1)
Nbin[n_bit-i] = bit_state
return Nbin
def modular_multiplication(qc,a,N):
"""
applies the unitary operator that implements
modular multiplication function x -> a*x(modN)
Only works for the particular case x -> 7*x(mod15)!
"""
for i in range(0,3):
qc.x(i)
qc.cx(2,1)
qc.cx(1,2)
qc.cx(2,1)
qc.cx(1,0)
qc.cx(0,1)
qc.cx(1,0)
qc.cx(3,0)
qc.cx(0,1)
qc.cx(1,0)
def quantum_period(a, N, n_bit):
# Quantum part
print(" Searching the period for N =", N, "and a =", a)
qr = QuantumRegister(n_bit)
cr = ClassicalRegister(n_bit)
qc = QuantumCircuit(qr,cr)
simulator = Aer.get_backend('qasm_simulator')
s0 = randint(1, N-1) # Chooses random int
sbin = to_binary(s0,n_bit) # Turns to binary
print("\n Starting at \n s =", s0, "=", "{0:b}".format(s0), "(bin)")
# Quantum register is initialized with s (in binary)
for i in range(0,n_bit):
if sbin[n_bit-i-1]:
qc.x(i)
s = s0
r=-1 # makes while loop run at least 2 times
# Applies modular multiplication transformation until we come back to initial number s
while s != s0 or r <= 0:
r+=1
# sets up circuit structure
qc.measure(qr, cr)
modular_multiplication(qc,a,N)
qc.draw('mpl')
# runs circuit and processes data
job = execute(qc,simulator, shots=10)
result_counts = job.result().get_counts(qc)
result_histogram_key = list(result_counts)[0] # https://qiskit.org/documentation/stubs/qiskit.result.Result.get_counts.html#qiskit.result.Result.get_counts
s = int(result_histogram_key, 2)
print(" ", result_counts)
plt.show()
print("\n Found period r =", r)
return r
if __name__ == '__main__':
a = 7
N = 15
n_bit=5
r = quantum_period(a, N, n_bit)
|
https://github.com/BoltzmannEntropy/QMLDocker
|
BoltzmannEntropy
|
%reset -f
import matplotlib.font_manager
import numpy as np
from matplotlib import pyplot as plt
font = {
'family':'SimHei',
'weight':'bold',
'size':'16'
}
matplotlib.rc("font",**font)
from IPython.display import display_pretty
import warnings
warnings.filterwarnings("ignore")
from qutip import *
q = (1/np.sqrt(7))*Qobj(np.array([[1+ 1j], [1-2j]]).T)
q.conj().full()
p=(1/np.sqrt(2))*Qobj(np.array([[-1j], [1]]))
p.full()
q.conj()*p
from sympy import Matrix, symbols, sqrt, init_printing
from sympy.physics.quantum import TensorProduct
from IPython.display import display_pretty
init_printing(use_latex=True)
a1 = symbols('a_1')
b1 = symbols('b_1')
a2 = symbols('a_2')
b2 = symbols('b_2')
# psi1 = Matrix(2,1,[a1,b1])
# psi2 = Matrix(2,1,[a2,b2])
U_I = Matrix([[1,0],
[0,1]])
U_H = 1/sqrt(2)*Matrix([[1, 1],
[1,-1]])
U_Z=Matrix(2,2,[1,0,0,-1])
Cnot=Matrix([[1,0,0,0],
[0,1,0,0],
[0,0,0,1],
[0,0,1,0]])
psi1 = Matrix(2,1,[1,0])
psi2 = Matrix(2,1,[1,0])
psi12 = TensorProduct(psi1,psi2)
U_FINAL= Cnot*(TensorProduct(U_I,U_Z)*TensorProduct(U_H,U_H))*psi12
U_FINAL
from sympy import Matrix, symbols, sqrt, init_printing
from sympy.physics.quantum import TensorProduct
from IPython.display import display_pretty
init_printing(use_latex=True)
a1 = symbols('a_1')
b1 = symbols('b_1')
a2 = symbols('a_2')
b2 = symbols('b_2')
U_I = Matrix([[1,0],
[0,1]])
U_H = 1/sqrt(2)*Matrix([[1, 1],
[1,-1]])
U_Z=Matrix(2,2,[1,0,0,-1])
U_X=Matrix(2,2,[0,1,1,0])
Cnot=Matrix([[1,0,0,0],
[0,1,0,0],
[0,0,0,1],
[0,0,1,0]])
psi0 = Matrix(2,1,[1,0])
psi1 = Matrix(2,1,[0,1])
psi3 = a1*psi0 + b1*psi1
psi3
# psi1 = Matrix(2,1,[a1,b1])
# psi2 = Matrix(2,1,[a2,b2])
# psi12 = TensorProduct(psi1,psi2)
# # psi12 = psi1+ psi2
U_FINAL= TensorProduct(Cnot,U_I)*(TensorProduct(U_X,U_I,U_Z)*TensorProduct(Cnot,U_I))* TensorProduct(U_H,U_Z,U_I)
U_FINAL * TensorProduct(psi0, psi1, psi0)
from sympy import sqrt, symbols, Rational
from sympy import expand, Eq, Symbol, simplify, exp, sin, srepr
from sympy.physics.quantum import *
from sympy.physics.quantum.qubit import *
from sympy.physics.quantum.gate import *
from sympy.physics.quantum.grover import *
from sympy.physics.quantum.qft import QFT, IQFT, Fourier
from sympy.physics.quantum.circuitplot import circuit_plot
for gate in [X,Y,Z]:
for state in [Qubit('0'),Qubit('1')]:
lhs = gate(0)*state
rhs = qapply(lhs)
display(Eq(lhs,rhs))
%matplotlib inline
import numpy as np
from warnings import filterwarnings
filterwarnings(action='ignore', category=DeprecationWarning)
phi_plus = np.array([1, 0, 0, 1])/np.sqrt(2) # | Phi^+ >
phi_minus = np.array([1, 0, 0, -1])/np.sqrt(2) # | Phi^- >
psi_plus = np.array([0, 1, 1, 0])/np.sqrt(2) # | Psi^+ >
psi_minus = np.array([0, 1, -1, 0])/np.sqrt(2) # | Psi^- >
import torch
initial_state = [1,0] # Define initial_state as |0>
# initial_state = [0,1] # Define initial_state as |0>
# initial_state = [1/np.sqrt(2), 1/np.sqrt(2)] # Define state |q_0>
initial_state_torch = torch.tensor([initial_state])
x_gate_matrix = torch.tensor([[0,1],[1,0]])
out_state_torch = torch.mm(x_gate_matrix,initial_state_torch.t())
print("after X matrix, the state is ",out_state_torch.t()) # Display the output state vector
import numpy
from numpy import pi as PI
import paddle
import paddle_quantum
from paddle import matmul
from paddle_quantum.ansatz import Circuit
from paddle_quantum.qinfo import random_pauli_str_generator, pauli_str_to_matrix
from paddle_quantum.linalg import dagger
N = 1 # Number of qubits
SEED = np.random.randint(9999) # Fixed random seed
# Generate random Hamiltonian represented by Pauli string
numpy.random.seed(SEED)
hamiltonian = random_pauli_str_generator(N, terms=1)
print("Random Hamiltonian in Pauli string format = \n", hamiltonian)
# Generate matrix representation of Hamiltonian
complex_dtype = paddle_quantum.get_dtype()
H = pauli_str_to_matrix(hamiltonian, N).astype(complex_dtype)
H
|
https://github.com/quantumgenetics/quantumgenetics
|
quantumgenetics
|
from multiprocessing import Pool
from qiskit import Aer
import numpy as np
from collections import deque
from copy import deepcopy
import math
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
import random
from functools import reduce
class Genetic_Executor:
'''
It is just a class to keep things together.
'''
def __init__(\
self,
number_of_processors,
population_initializer,
population_size,
parallel_population_initialization_flag=True):
'''
number_of_processors -> positive integer
the number of processes that will be spawned
population_initializer -> function
a function that returns an iterable representing the population
'''
self.processs_pool = Pool(number_of_processors)
self.population = population_initializer(population_size, self.processs_pool)
self.population_size = population_size
def apply_operation(self, operation_on_population):
'''
operation_on_population -> function
It receives the process pool and the population. It is called here
'''
return operation_on_population(self.processs_pool, self.population)
def get_number_gate_seq_relation(depth, available_gates):
'''
Based on how many gates can there be, after a qubit,
all the possible combinations of the gates are generated in a list and
returned.
'''
av_gates_tuple = tuple()
for key in available_gates.keys():
av_gates_tuple += available_gates[key]
av_gates_tuple += ('i',)
mapping_list = []
current_seq_gates_list = []
dq = deque([(gate,0) for gate in av_gates_tuple])
while dq:
gate, d = dq.pop()
current_seq_gates_list = current_seq_gates_list[:d]
current_seq_gates_list.append(gate)
if d == depth - 1:
mapping_list.append(deepcopy(current_seq_gates_list))
elif d < depth - 1:
for g in av_gates_tuple:
dq.append((g,d+1,))
return mapping_list
def get_binary(decimal):
'''
Gets a decimal as input. Outputs a list of the decimals binary representation.
e.g. If the nuber of binaries in the list is 3 and decimal is 3, then the
function outputs [0,1,1].
'''
key = [0 for _ in range(number_of_qubits_in_possible_circuit)]
s_i = format(decimal, "b")
k = number_of_qubits_in_possible_circuit - 1
j = len(s_i)-1
while j >= 0:
if s_i[j] == '1': key[k] = 1
k-=1
j-=1
return key
def get_binary_from_str(s):
key = []
for ch in s:
if ch == '0':
key.append(0)
else:
key.append(1)
return key
def get_decimal(binary):
'''
Gets a binary list and returns the decimal representaion.
'''
i = len(binary)-1
a = 0
for b in binary:
a += b**i
i -= 1
return a
def get_random_goal_function(number_of_qubits_in_possible_circuit):
'''
Generates a random binary function of "number_of_qubits_in_possible_circuit"
bits.
'''
goal_function_dict = dict()
for i in range(2**number_of_qubits_in_possible_circuit):
goal_function_dict[tuple(get_binary(i))] = np.random.choice(2,
number_of_qubits_in_possible_circuit)
return goal_function_dict
def chromosome_initialzer(a):
'''
Randomly initializez and returns a chromosome (a tuple of 3 elements:
theta, and amplitudes based on theta). Argument "a" is not used anywhere.
'''
theta_arr = 2 * math.pi * np.random.random(number_of_qubits_in_individual)
return (theta_arr,\
np.cos(theta_arr),\
np.sin(theta_arr))
def initializer1(pop_size, p_pool):
return p_pool.map(chromosome_initialzer, [None for _ in range(pop_size)])
def get_pop_fitness(p_pool, population_iterable):
'''
Collapses the individuals and then evalueates the circuit.
'''
fitness_list = []
for theta_arr, _, _ in population_iterable:
qr = QuantumRegister(number_of_qubits_in_individual)
cr = ClassicalRegister(number_of_qubits_in_individual)
qc = QuantumCircuit(qr, cr,)
for i in range(number_of_qubits_in_individual):
qc.u3(theta_arr[i], 0, 0, qr[i])
qc.measure(qr,cr)
job = execute(qc, backend=backend, shots=1,)
results = job.result()
answer = results.get_counts()
binary_list = get_binary_from_str(tuple(answer.keys())[0])
if binary_mutation_flag:
for i in range(len(binary_list)):
if random.random() < binary_mutation_prob:
if binary_list[i] == 0:
binary_list[i] = 1
else:
binary_list[i] = 0
binary_bu_list = deepcopy(binary_list)
qubits_per_line = len(binary_list)//number_of_qubits_in_possible_circuit
v = 0
for key in goal_function.keys():
qr = QuantumRegister(number_of_qubits_in_possible_circuit)
cr = ClassicalRegister(number_of_qubits_in_possible_circuit)
qc = QuantumCircuit(qr, cr,)
for i in range(number_of_qubits_in_possible_circuit):
if key[i] == 1: qc.x(qr[i])
a = 0
for i in range(number_of_qubits_in_possible_circuit):
config = config_list[get_decimal(binary_bu_list[a:a+qubits_per_line])]
for gate in config:
if gate == 'i':
qc.iden(qr[i])
elif gate == 's':
qc.u3(0,0,math.pi/4,qr[i])
elif gate == 'h':
qc.h(qr[i])
else:
qc.cx(qr[i],qr[(i+1)%number_of_qubits_in_possible_circuit])
a += qubits_per_line
qc.measure(qr,cr)
job = execute(qc, backend=backend, shots=global_shots,\
backend_options={\
"max_parallel_threads":0,
'max_parallel_shots':0})
results = job.result()
answer = results.get_counts()
goal_value = reduce(
lambda acc, x: acc + str(x),
goal_function[key],
''
)
if goal_value not in answer:
v += global_shots
else:
v += global_shots - answer[goal_value]
fitness_list.append((v,binary_bu_list))
return fitness_list
def main0():
global number_of_qubits_in_individual,backend,number_of_qubits_in_possible_circuit,config_list
number_of_qubits_in_individual = 18
number_of_qubits_in_possible_circuit = 3
available_gates = {
1 : ('s', 'h'),
2 : ('cnot',),
}
kill_percentage = 0.5
population_size = 15
depth = 3
global binary_mutation_flag, binary_mutation_prob
binary_mutation_flag = True
binary_mutation_prob = 0.1
surviving_population_size = int(population_size * (1 - kill_percentage))
killed_population_size = population_size - surviving_population_size
global global_shots,goal_function
global_shots = 2048
backend = Aer.get_backend('qasm_simulator',)
config_list = get_number_gate_seq_relation(depth, available_gates)
goal_function = get_random_goal_function(number_of_qubits_in_possible_circuit)
ge = Genetic_Executor(7, initializer1, population_size,)
for it_no in range(10):
fitness_values = list( enumerate( map( lambda e: e[0], ge.apply_operation(get_pop_fitness) ) ) )
fitness_values.sort(key=lambda p: p[1])
print(f'{it_no}: {fitness_values[0][1]}')
new_pop = []
for i in range(surviving_population_size):
new_pop.append(ge.population[fitness_values[i][0]])
ge.population = new_pop
for _ in range(killed_population_size):
theta_arr = np.empty(number_of_qubits_in_individual)
alpha_arr = np.empty(number_of_qubits_in_individual)
beta_arr = np.empty(number_of_qubits_in_individual)
first_index = random.choice(range(surviving_population_size))
second_index = random.choice(tuple(filter(lambda e: e!=first_index, range(surviving_population_size))))
for i in range(number_of_qubits_in_individual):
if random.choice((True,False,)):
theta_arr[i] = ge.population[first_index][0][i]
alpha_arr[i] = ge.population[first_index][1][i]
beta_arr[i] = ge.population[first_index][2][i]
else:
theta_arr[i] = ge.population[second_index][0][i]
alpha_arr[i] = ge.population[second_index][1][i]
beta_arr[i] = ge.population[second_index][2][i]
ge.population.append((theta_arr,alpha_arr,beta_arr))
if __name__ == '__main__':
main0()
|
https://github.com/quantumgenetics/quantumgenetics
|
quantumgenetics
|
#!/usr/bin/env python3
import logging
from datetime import datetime
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
logger = logging.getLogger(__name__)
def map_circuit(circuit_qcount, gate_map, measurements):
logger.info('Producing circuit...')
before = datetime.now()
qr = QuantumRegister(circuit_qcount)
cr = ClassicalRegister(circuit_qcount)
circuit = QuantumCircuit(qr, cr)
for m in measurements:
gate_map[m](circuit)
delta = datetime.now() - before
logger.info('Circuit produced ({} s)'.format(delta.total_seconds()))
return circuit
|
https://github.com/quantumgenetics/quantumgenetics
|
quantumgenetics
|
#!/usr/bin/env python3
import logging
from datetime import datetime
from qiskit import QuantumCircuit
logger = logging.getLogger(__name__)
def initialized_empty_circuit(qubit_count, state):
logger.info('Produce circuit')
return initialized_circuit(QuantumCircuit(qubit_count, qubit_count), state)
def initialized_circuit(circuit, state):
qubit_count = len(circuit.qubits)
assert len(state) == 2**qubit_count
pre_circuit = QuantumCircuit(circuit.qregs[0], circuit.cregs[0])
logger.info('Initializing circuit...')
before = datetime.now()
pre_circuit.initialize(state, range(qubit_count))
delta = datetime.now() - before
logger.info('Circuit initialized ({} s)'.format(delta.total_seconds()))
post_circuit = circuit.copy()
post_circuit.barrier(post_circuit.qregs[0])
post_circuit.measure(range(qubit_count), range(qubit_count))
return pre_circuit + post_circuit
|
https://github.com/quantumgenetics/quantumgenetics
|
quantumgenetics
|
#!/usr/bin/env python3
import logging
from datetime import datetime
from qiskit import Aer, IBMQ
from qiskit import execute as qiskit_execute
from qiskit.providers.aer import noise
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import pauli_error
# List of gate times for ibmq_14_melbourne device
_EXEC_SHOTS = 1
_GATE_TIMES = [
('u1', None, 0), ('u2', None, 100), ('u3', None, 200),
('cx', [1, 0], 678), ('cx', [1, 2], 547), ('cx', [2, 3], 721),
('cx', [4, 3], 733), ('cx', [4, 10], 721), ('cx', [5, 4], 800),
('cx', [5, 6], 800), ('cx', [5, 9], 895), ('cx', [6, 8], 895),
('cx', [7, 8], 640), ('cx', [9, 8], 895), ('cx', [9, 10], 800),
('cx', [11, 10], 721), ('cx', [11, 3], 634), ('cx', [12, 2], 773),
('cx', [13, 1], 2286), ('cx', [13, 12], 1504), ('cx', [], 800)
]
_LOCAL_BACKEND_NAME = 'qasm_simulator'
_P_RESET = 0.8
_P_MEAS = 0.6
_P_GATE1 = 0.7
_REMOTE_BACKEND_NAME = 'ibmq_16_melbourne'
logger = logging.getLogger(__name__)
def ibm_noise_configuration(remote_backend=_REMOTE_BACKEND_NAME, gate_times=_GATE_TIMES):
logger.info('Produce IBM noise configuration')
logger.info('Loading IBM account...')
before = datetime.now()
provider = IBMQ.load_account()
delta = datetime.now() - before
logger.info('IBM account loaded ({} s)'.format(delta.total_seconds()))
device = provider.get_backend(remote_backend)
noise_model = noise.device.basic_device_noise_model(device.properties(),
gate_times=gate_times)
coupling_map = device.configuration().coupling_map
return (noise_model, coupling_map)
def custom_noise_configuration(remote_backend=_REMOTE_BACKEND_NAME):
logger.info('Produce custom noise configuration')
logger.info('Loading IBM account...')
before = datetime.now()
provider = IBMQ.load_account()
delta = datetime.now() - before
logger.info('IBM account loaded ({} s)'.format(delta.total_seconds()))
device = provider.get_backend(remote_backend)
coupling_map = device.configuration().coupling_map
# QuantumError objects
error_reset = pauli_error([('X', _P_RESET), ('I', 1 - _P_RESET)])
error_meas = pauli_error([('X', _P_MEAS), ('I', 1 - _P_MEAS)])
error_gate1 = pauli_error([('X', _P_GATE1), ('I', 1 - _P_GATE1)])
error_gate2 = error_gate1.tensor(error_gate1)
# Add errors to noise model
noise_bit_flip = NoiseModel()
noise_bit_flip.add_all_qubit_quantum_error(error_reset, "reset")
noise_bit_flip.add_all_qubit_quantum_error(error_meas, "measure")
noise_bit_flip.add_all_qubit_quantum_error(error_gate1, ["u1", "u2", "u3"])
noise_bit_flip.add_all_qubit_quantum_error(error_gate2, ["cx"])
return (noise_bit_flip, coupling_map)
def execute(circuit,
*,
configuration=None,
local_simulator=_LOCAL_BACKEND_NAME,
shots=_EXEC_SHOTS):
logger.info('Execute circuit')
simulator = Aer.get_backend(local_simulator)
logger.info('Executing...')
before = datetime.now()
if configuration:
noise_model = configuration[0]
coupling_map = configuration[1]
result = qiskit_execute(circuit,
simulator,
noise_model=noise_model,
coupling_map=coupling_map,
basis_gates=noise_model.basis_gates,
shots=shots,
backend_options={
"max_parallel_threads":0,
'max_parallel_shots':0}
).result()
else:
result = qiskit_execute(circuit, simulator, shots=shots).result()
delta = datetime.now() - before
logger.info('Execution completed ({} s)'.format(delta.total_seconds()))
return result.get_counts(circuit)
|
https://github.com/VGGatGitHub/2020-QISKit-Summer-Jam
|
VGGatGitHub
|
import warnings
#warnings.filterwarnings('ignore', 'DeprecationWarning')
warnings.filterwarnings('once')
#%pip uninstall qiskit
#%pip install qiskit #==0.16
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
# useful additional packages
import matplotlib.pyplot as plt
import matplotlib.axes as axes
%matplotlib inline
import numpy as np
import networkx as nx
from qiskit import Aer
from qiskit.tools.visualization import plot_histogram
#VGG todo 1: the equivalent run_algorithm and EnergyInput versions updates
#from qiskit.aqua.translators.ising import max_cut, tsp
#from qiskit.aqua import run_algorithm
#from qiskit.aqua.input import EnergyInput
#old v0.16# from qiskit.optimization.ising import max_cut, tsp
#old v0.16# from qiskit.optimization.ising.common import sample_most_likely
#older# from qiskit.optimization.ising import docplex
from qiskit.optimization.applications.ising import max_cut, tsp
from qiskit.optimization.applications.ising.common import sample_most_likely
from qiskit.optimization.applications.ising import docplex
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import NumPyEigensolver as ExactEigensolver
from qiskit.aqua.components.optimizers import SPSA
#from qiskit.aqua.components.variational_forms import RY #RealAmplitudes
from qiskit.circuit.library import RealAmplitudes as RY
from qiskit.aqua import QuantumInstance
# setup aqua logging
import logging
from qiskit.aqua import set_qiskit_aqua_logging
# set_qiskit_aqua_logging(logging.DEBUG) # choose INFO, DEBUG to see the log
from qiskit import IBMQ
provider = IBMQ.load_account()
#VGG select the backend for coupling_map
try:
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex')
#backend = provider.get_backend('ibmq_london')#'ibmq_16_melbourne')#'ibmq_essex')
#backend = provider.get_backend('ibmq_5_yorktown')#'ibmq_london')#'ibmq_16_melbourne')#'ibmq_essex')
except:
backend = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
coupling_map = backend.configuration().coupling_map
print(coupling_map)
#VGG we will generate a diferent coupling_map for the graph
#coupling_map=None
from typing import List, Tuple
seed = 19120623
np.random.seed(seed)
#VGG: function adopted from the Rigetti's MaxCutQAOA.ipynb
def generate_ising_graph(edges: List[Tuple[int, int]]) -> nx.Graph:
graph = nx.from_edgelist(edges)
weights: np.ndarray = np.random.rand(graph.number_of_edges()) #VGG the old [-1,1] range into [0,1]
weights /= np.linalg.norm(weights)
nx.set_edge_attributes(graph, {e: {'weight': w} for e, w in zip(graph.edges, weights)})
return graph
if coupling_map != None:
G=generate_ising_graph(coupling_map)
n=G.number_of_nodes()
print(n)
# Generating a graph if there were no coupling_map
if coupling_map== None:
#define the edges / coupling_map
#'ibmq_16_melbourne'
elist=[[0, 1], [0, 14], [1, 0], [1, 2], [1, 13], [2, 1], [2, 3], [2, 12],
[3, 2], [3, 4], [3, 11], [4, 3], [4, 5], [4, 10], [5, 4], [5, 6],
[5, 9], [6, 5], [6, 8], [7, 8], [8, 6], [8, 7], [8, 9], [9, 5], [9, 8],
[9, 10], [10, 4], [10, 9], [10, 11], [11, 3], [11, 10], [11, 12], [12, 2],
[12, 11], [12, 13], [13, 1], [13, 12], [13, 14], [14, 0], [14, 13]]
#elist=[[0,1],[0,2],[0,3],[1,2],[2,3],[0,4],[0,2], [4, 3],[1,5],[3,5]]
elist=[[0,1],[0,2],[0,3],[1,2],[2,3],[0,4],[0,2], [4, 3]]
G=generate_ising_graph(elist)
n=G.number_of_nodes()
#other ways to define the graph
#n=5 # Number of nodes in graph
#G=nx.Graph()
#G.add_nodes_from(np.arange(0,n,1))
#ewlist=[(0,1,1.),(0,2,.5),(0,3,0),(1,2,1.0),(0,3,1.0)]
#G1 = nx.from_edgelist(elist)
#G1.add_weighted_edges_from(ewlist)
coupling_map = backend.configuration().coupling_map
#Visulaize
print(G.number_of_nodes(),G.number_of_edges())
colors = ['r' for node in G.nodes()]
pos = nx.spring_layout(G)
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
nx.drawing.nx_pylab.draw(G)
elist=G.edges()
print("elist=",elist)
ewlist=[(i,j,G.get_edge_data(i,j,default=0)['weight']) for i,j in G.edges()]
print('ewlist=',ewlist)
def issymmetric(Matrix):
dim=Matrix.shape[0]
if Matrix.shape[1] != dim:
print("Shape Error!")
return False
for i in range(dim):
for j in range(i,dim):
if Matrix[i,j]!=Matrix[j,i]:
print("Shape Error:",(i,j),Matrix[i,j],Matrix[j,i],"difference:",Matrix[i,j]-Matrix[j,i])
return False
return True
# Computing the weight matrix from the random graph
w = np.zeros([n,n])
w = np.eye(n)
for i in range(n):
for j in range(n):
temp = G.get_edge_data(i,j,default=0)
if temp != 0:
w[i,j] = temp['weight']
w/=np.linalg.det(w)**(1/n)
print("Symmetric:",issymmetric(w),"Norm:",np.linalg.norm(w))
print("Eignvlues:",np.linalg.eigvals(w),"det:",np.linalg.det(w))
print(w)
np.sum(w)/4 #the offset value
def Max_Cut_BF(W,*x0):
best_cost_brute = 0
xbest_brute=np.array([1]*n)
for b in range(2**n):
x = [int(t) for t in reversed(list(bin(b)[2:].zfill(n)))]
cost = 0
for h in x0:
cost -= np.dot(h,x)/n #VGG don't give free samples to those with h==1
for i in range(n):
cost +=(2-np.dot(x,x))/n/2 #VGG try to favor fewer free samples
for j in range(n):
cost += W[i,j]*x[i]*(1-x[j])
if np.isclose(cost,best_cost_brute):
if sum(x)<sum(xbest_brute):
best_cost_brute = cost
xbest_brute = x
else:
if best_cost_brute < cost:
best_cost_brute = cost
xbest_brute = x
if 1==2:
print('case = ' + str(x)+ ' cost = ' + str(cost))
return best_cost_brute, xbest_brute
%%time
if n < 10:
best_cost_brute, xbest_brute = Max_Cut_BF(w)
colors = ['r' if xbest_brute[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, pos=pos)
print('\nBest solution = ' + str(xbest_brute) + ' cost = ' + str(best_cost_brute))
np.set_printoptions(precision=3)
print(w)
def market_simulations(m,*opt):
free_samples=0
boughten=0
Mw2 = np.zeros([n,n])+w
relations=np.zeros([n,n])
x_free_samples=np.int_(np.zeros(n))
np.set_printoptions(precision=2)
if 'q' in opt:
print("Using Max_Cut option:",'q')
print("submiting for results using:",backend)
elif 'dcplx' in opt:
print("Using Max_Cut option:",'Docplex')
else:
print("Using Max_Cut_BF")
if n > 10 :
print("It may take too long to do Brute Force Calulations - skiping!")
return
print("day"," [free samples]"," [buyers distribution]"," [to be used as constrain]"," the two totals"," w-det")
for i in range(m):
if sum(x_free_samples)>n*2/3:
#VGG In the future one can consider giving out to these who have not recived yet!
x_free_samples = np.array([1]*n)-np.int_(x_free_samples!=0)
if (x_free_samples==0).all:
x=np.array([0]*n)
xbest_brute=np.array([0]*n)
tmp1=Mw2/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,]) #sum probaility contributions
else:
x=x_free_samples
free_samples+=sum(x)
tmp1=Mw2[x==1]/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,],(np.array([1]*n)-x)) #sum probaility contributions
tmp-=np.array([1]*n) #push to negative those with free samples
else:
if 'q' in opt:
best_cost_brute, xbest_brute = Max_Cut_IBMQ(Mw2,x_free_samples)
elif 'dcplx' in opt:
best_cost_brute, xbest_brute = Max_Cut_Dcplx(Mw2,x_free_samples)
else:
best_cost_brute, xbest_brute = Max_Cut_BF(Mw2,x_free_samples)
x=np.array(xbest_brute)
x_free_samples+=x
free_samples+=sum(x)
tmp1=Mw2[x==1]/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,],(np.array([1]*n)-x)) #sum probaility contributions
tmp-=np.array([1]*n) #push to negative those with free samples
#print(tmp)
ab=sum(tmp[tmp > 0])
for j in range(n):
test=np.random.uniform()*ab/2
if tmp[j] > test: #buy the product
x[j]+=1
boughten+=1
x0=np.array(xbest_brute)
x-=x0
relation_today=x.reshape(n,1) @ x0.reshape(1,n)
relation_today+=relation_today.T
relations+=relation_today
#print(x0,x,"\n",relation_today)
#print(x0,x,x_free_samples,tmp)
print(i,x0,x,x_free_samples,free_samples, boughten,'{:6.4f}'.format(np.linalg.det(Mw2)))
if i%4==0 : #weekely updates of the w matrix
Mw2+=(np.eye(n)+relations)/n/100 #update the w matrix
relations=np.zeros([n,n])
if issymmetric(Mw2) and np.linalg.det(Mw2)>0.:
Mw2/=np.linalg.det(Mw2)**(1/n)
else:
Mw2/=np.linalg.norm(Mw2,ord='fro')
print("\nlast day configuration record:\n")
print(x0,tmp)
print()
print(x,free_samples, boughten, '{:6.4f}'.format(np.linalg.norm(Mw2)),'{:6.4f}'.format(np.linalg.det(Mw2)))
print()
print(Mw2)
return
%time market_simulations(10)
#VGG qubitOp, offset = max_cut.get_max_cut_qubitops(w)
qubitOp, offset = max_cut.get_operator(w)
#algo_input = EnergyInput(qubitOp)
offset
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
ee = ExactEigensolver(qubitOp, k=3)
result = ee.run()
print("energys:",result['eigenvalues'].real)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstates'][0])
print('energy:', result['eigenvalues'][0],', offset:',offset)
print('max-cut objective:', result['eigenvalues'][0] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
#VGG note that the other runs had implemeneted soft constrains!
from docplex.mp.model import Model
#VGG from qiskit.aqua.translators.ising import docplex
#older# from qiskit.optimization.ising import docplex
# Create an instance of a model and variables.
mdl = Model(name='max_cut')
y = {i: mdl.binary_var(name='x_{0}'.format(i)) for i in range(n)}
# Object function
#VGG added y[i]/100 term to split the degenerate 1<->0 states in favor of less free samples
max_cut_func = mdl.sum(y[i]/50+w[i,j]* y[i] * ( 1 - y[j] ) for i in range(n) for j in range(n))
mdl.maximize(max_cut_func)
# No constraints for Max-Cut problems.
qubitOp_docplex, offset_docplex = docplex.get_operator(mdl)
offset_docplex
#VGG define the above as a function
def set_up_Dcplx_model(W,*c_x0):
mdl = Model(name='max_cut')
y = {i: mdl.binary_var(name='y_{0}'.format(i)) for i in range(n)}
#VGG try to favor fewer free samples using (2-np.dot(x,x))/n/2
#VGG split the degenerate 1<->0 states in favor of less free samples using x[i]/n**2
max_cut_func=mdl.sum((-1)*(2-y[i])*0.5+(-0)*y[i]/100 for i in range(n))
#VGG don't give free samples to those with h==1
max_cut_func+=mdl.sum(h[i]*y[i]*0.55 for i in range(n) for h in c_x0)
max_cut_func+=mdl.sum(W[i,j]* y[i] * ( 1 - y[j] ) for i in range(n) for j in range(n))
mdl.maximize(max_cut_func)
qubitOp, offset = docplex.get_operator(mdl)
return qubitOp, offset
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w)
print(offset_docplex,x)
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
ee = ExactEigensolver(qubitOp_docplex, k=3)
result = ee.run()
print("energys:",result['eigenvalues'].real)
x = sample_most_likely(result['eigenstates'][0])
print('energy:', result['eigenvalues'][0].real)
print('max-cut objective:', result['eigenvalues'][0].real + offset_docplex)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
#VGG note if you keep executing this cell you can see diferent configurations
#VGG define the above as a function
def Max_Cut_Dcplx(W,*c_x0):
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W)
for h in c_x0:
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W,h)
ee = ExactEigensolver(qubitOp_docplex,k=3)
result = ee.run()
x=sample_most_likely(result['eigenstates'][0])
x_dcplx=max_cut.get_graph_solution(x).tolist()
cost_dcplx=result['eigenvalues'][0].real
#cost_dcplx=max_cut.max_cut_value(x, W)
return cost_dcplx, x_dcplx
%time market_simulations(10,'dcplx')
warnings.filterwarnings('once')
model=qubitOp_docplex
#model=qubitOp
backend1 = Aer.get_backend('statevector_simulator')
backend2 = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
seed = 10598
spsa = SPSA(max_trials=10)
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
#VGG backend1 = Aer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend2, seed_simulator=seed, seed_transpiler=seed)
print(backend2)
result = vqe.run(quantum_instance)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
#warnings.filterwarnings('ignore', 'DeprecationWarning')
warnings.filterwarnings('once')
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=30)
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
backend2 = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
quantum_instance = QuantumInstance(backend2, shots=1024, seed_simulator=seed, seed_transpiler=seed)
print(backend2)
result = vqe.run(quantum_instance)
"""declarative approach, update the param from the previous cell.
params['backend']['provider'] = 'qiskit.BasicAer'
params['backend']['name'] = 'qasm_simulator'
params['backend']['shots'] = 1024
result = run_algorithm(params, algo_input)
"""
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
plot_histogram(result['eigenstate'])
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
plot_histogram(result['eigenstate'])
backend_old=backend
#backend=backend2
#warnings.filterwarnings('ignore', 'DeprecationWarning')
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=10) #VGG 300
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
#backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
print("submiting for results using:",backend)
result = vqe.run(quantum_instance)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
plot_histogram(result['eigenstate'])
#VGG define the above as a function
def Max_Cut_IBMQ(W,*c_x0):
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W)
for h in c_x0:
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W,h)
model=qubitOp_docplex
spsa = SPSA(max_trials=10) #VGG 300
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
result = vqe.run(quantum_instance)
x = sample_most_likely(result['eigenstate'])
cost_vqe=max_cut.max_cut_value(x, W)
x_vqe =np.int_(max_cut.get_graph_solution(x)).tolist()
return cost_vqe, x_vqe
%time market_simulations(10)
%time market_simulations(10,'dcplx')
print(backend)
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
#backend = Aer.get_backend('qasm_simulator')
#backend = Aer.get_backend('statevector_simulator')
print(backend)
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
#backend = Aer.get_backend('qasm_simulator')
#backend = Aer.get_backend('statevector_simulator')
%time market_simulations(10,'q')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/VGGatGitHub/2020-QISKit-Summer-Jam
|
VGGatGitHub
|
import warnings
#warnings.filterwarnings('ignore', 'DeprecationWarning')
warnings.filterwarnings('once')
#%pip uninstall qiskit
#%pip install qiskit #==0.16
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
# useful additional packages
import matplotlib.pyplot as plt
import matplotlib.axes as axes
%matplotlib inline
import numpy as np
import networkx as nx
from qiskit import Aer
from qiskit.tools.visualization import plot_histogram
#VGG todo 1: the equivalent run_algorithm and EnergyInput versions updates
#from qiskit.aqua.translators.ising import max_cut, tsp
#from qiskit.aqua import run_algorithm
#from qiskit.aqua.input import EnergyInput
#old v0.16# from qiskit.optimization.ising import max_cut, tsp
#old v0.16# from qiskit.optimization.ising.common import sample_most_likely
#older# from qiskit.optimization.ising import docplex
from qiskit.optimization.applications.ising import max_cut, tsp
from qiskit.optimization.applications.ising.common import sample_most_likely
from qiskit.optimization.applications.ising import docplex
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import NumPyEigensolver as ExactEigensolver
from qiskit.aqua.components.optimizers import SPSA
#from qiskit.aqua.components.variational_forms import RY #RealAmplitudes
from qiskit.circuit.library import RealAmplitudes as RY
from qiskit.aqua import QuantumInstance
# setup aqua logging
import logging
from qiskit.aqua import set_qiskit_aqua_logging
# set_qiskit_aqua_logging(logging.DEBUG) # choose INFO, DEBUG to see the log
from qiskit import IBMQ
provider = IBMQ.load_account()
#VGG select the backend for coupling_map
try:
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex')
#backend = provider.get_backend('ibmq_london')#'ibmq_16_melbourne')#'ibmq_essex')
#backend = provider.get_backend('ibmq_5_yorktown')#'ibmq_london')#'ibmq_16_melbourne')#'ibmq_essex')
except:
backend = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
coupling_map = backend.configuration().coupling_map
print(coupling_map)
#VGG we will generate a diferent coupling_map for the graph
#coupling_map=None
from typing import List, Tuple
seed = 19120623
np.random.seed(seed)
#VGG: function adopted from the Rigetti's MaxCutQAOA.ipynb
def generate_ising_graph(edges: List[Tuple[int, int]]) -> nx.Graph:
graph = nx.from_edgelist(edges)
weights: np.ndarray = np.random.rand(graph.number_of_edges()) #VGG the old [-1,1] range into [0,1]
weights /= np.linalg.norm(weights)
nx.set_edge_attributes(graph, {e: {'weight': w} for e, w in zip(graph.edges, weights)})
return graph
if coupling_map != None:
G=generate_ising_graph(coupling_map)
n=G.number_of_nodes()
print(n)
# Generating a graph if there were no coupling_map
if coupling_map== None:
#define the edges / coupling_map
#'ibmq_16_melbourne'
elist=[[0, 1], [0, 14], [1, 0], [1, 2], [1, 13], [2, 1], [2, 3], [2, 12],
[3, 2], [3, 4], [3, 11], [4, 3], [4, 5], [4, 10], [5, 4], [5, 6],
[5, 9], [6, 5], [6, 8], [7, 8], [8, 6], [8, 7], [8, 9], [9, 5], [9, 8],
[9, 10], [10, 4], [10, 9], [10, 11], [11, 3], [11, 10], [11, 12], [12, 2],
[12, 11], [12, 13], [13, 1], [13, 12], [13, 14], [14, 0], [14, 13]]
#elist=[[0,1],[0,2],[0,3],[1,2],[2,3],[0,4],[0,2], [4, 3],[1,5],[3,5]]
elist=[[0,1],[0,2],[0,3],[1,2],[2,3],[0,4],[0,2], [4, 3]]
G=generate_ising_graph(elist)
n=G.number_of_nodes()
#other ways to define the graph
#n=5 # Number of nodes in graph
#G=nx.Graph()
#G.add_nodes_from(np.arange(0,n,1))
#ewlist=[(0,1,1.),(0,2,.5),(0,3,0),(1,2,1.0),(0,3,1.0)]
#G1 = nx.from_edgelist(elist)
#G1.add_weighted_edges_from(ewlist)
coupling_map = backend.configuration().coupling_map
#Visulaize
print(G.number_of_nodes(),G.number_of_edges())
colors = ['r' for node in G.nodes()]
pos = nx.spring_layout(G)
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
nx.drawing.nx_pylab.draw(G)
elist=G.edges()
print("elist=",elist)
ewlist=[(i,j,G.get_edge_data(i,j,default=0)['weight']) for i,j in G.edges()]
print('ewlist=',ewlist)
def issymmetric(Matrix):
dim=Matrix.shape[0]
if Matrix.shape[1] != dim:
print("Shape Error!")
return False
for i in range(dim):
for j in range(i,dim):
if Matrix[i,j]!=Matrix[j,i]:
print("Shape Error:",(i,j),Matrix[i,j],Matrix[j,i],"difference:",Matrix[i,j]-Matrix[j,i])
return False
return True
# Computing the weight matrix from the random graph
w = np.zeros([n,n])
w = np.eye(n)
for i in range(n):
for j in range(n):
temp = G.get_edge_data(i,j,default=0)
if temp != 0:
w[i,j] = temp['weight']
w/=np.linalg.det(w)**(1/n)
print("Symmetric:",issymmetric(w),"Norm:",np.linalg.norm(w))
print("Eignvlues:",np.linalg.eigvals(w),"det:",np.linalg.det(w))
print(w)
np.sum(w)/4 #the offset value
def Max_Cut_BF(W,*x0):
best_cost_brute = 0
xbest_brute=np.array([1]*n)
for b in range(2**n):
x = [int(t) for t in reversed(list(bin(b)[2:].zfill(n)))]
cost = 0
for h in x0:
cost -= np.dot(h,x)/n #VGG don't give free samples to those with h==1
for i in range(n):
cost +=(2-np.dot(x,x))/n/2 #VGG try to favor fewer free samples
for j in range(n):
cost += W[i,j]*x[i]*(1-x[j])
if np.isclose(cost,best_cost_brute):
if sum(x)<sum(xbest_brute):
best_cost_brute = cost
xbest_brute = x
else:
if best_cost_brute < cost:
best_cost_brute = cost
xbest_brute = x
if 1==2:
print('case = ' + str(x)+ ' cost = ' + str(cost))
return best_cost_brute, xbest_brute
%%time
if n < 10:
best_cost_brute, xbest_brute = Max_Cut_BF(w)
colors = ['r' if xbest_brute[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, pos=pos)
print('\nBest solution = ' + str(xbest_brute) + ' cost = ' + str(best_cost_brute))
np.set_printoptions(precision=3)
print(w)
def market_simulations(m,*opt):
free_samples=0
boughten=0
Mw2 = np.zeros([n,n])+w
relations=np.zeros([n,n])
x_free_samples=np.int_(np.zeros(n))
np.set_printoptions(precision=2)
if 'q' in opt:
print("Using Max_Cut option:",'q')
print("submiting for results using:",backend)
elif 'dcplx' in opt:
print("Using Max_Cut option:",'Docplex')
else:
print("Using Max_Cut_BF")
if n > 10 :
print("It may take too long to do Brute Force Calulations - skiping!")
return
print("day"," [free samples]"," [buyers distribution]"," [to be used as constrain]"," the two totals"," w-det")
for i in range(m):
if sum(x_free_samples)>n*2/3:
#VGG In the future one can consider giving out to these who have not recived yet!
x_free_samples = np.array([1]*n)-np.int_(x_free_samples!=0)
if (x_free_samples==0).all:
x=np.array([0]*n)
xbest_brute=np.array([0]*n)
tmp1=Mw2/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,]) #sum probaility contributions
else:
x=x_free_samples
free_samples+=sum(x)
tmp1=Mw2[x==1]/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,],(np.array([1]*n)-x)) #sum probaility contributions
tmp-=np.array([1]*n) #push to negative those with free samples
else:
if 'q' in opt:
best_cost_brute, xbest_brute = Max_Cut_IBMQ(Mw2,x_free_samples)
elif 'dcplx' in opt:
best_cost_brute, xbest_brute = Max_Cut_Dcplx(Mw2,x_free_samples)
else:
best_cost_brute, xbest_brute = Max_Cut_BF(Mw2,x_free_samples)
x=np.array(xbest_brute)
x_free_samples+=x
free_samples+=sum(x)
tmp1=Mw2[x==1]/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,],(np.array([1]*n)-x)) #sum probaility contributions
tmp-=np.array([1]*n) #push to negative those with free samples
#print(tmp)
ab=sum(tmp[tmp > 0])
for j in range(n):
test=np.random.uniform()*ab/2
if tmp[j] > test: #buy the product
x[j]+=1
boughten+=1
x0=np.array(xbest_brute)
x-=x0
relation_today=x.reshape(n,1) @ x0.reshape(1,n)
relation_today+=relation_today.T
relations+=relation_today
#print(x0,x,"\n",relation_today)
#print(x0,x,x_free_samples,tmp)
print(i,x0,x,x_free_samples,free_samples, boughten,'{:6.4f}'.format(np.linalg.det(Mw2)))
if i%4==0 : #weekely updates of the w matrix
Mw2+=(np.eye(n)+relations)/n/100 #update the w matrix
relations=np.zeros([n,n])
if issymmetric(Mw2) and np.linalg.det(Mw2)>0.:
Mw2/=np.linalg.det(Mw2)**(1/n)
else:
Mw2/=np.linalg.norm(Mw2,ord='fro')
print("\nlast day configuration record:\n")
print(x0,tmp)
print()
print(x,free_samples, boughten, '{:6.4f}'.format(np.linalg.norm(Mw2)),'{:6.4f}'.format(np.linalg.det(Mw2)))
print()
print(Mw2)
return
%time market_simulations(10)
#VGG qubitOp, offset = max_cut.get_max_cut_qubitops(w)
qubitOp, offset = max_cut.get_operator(w)
#algo_input = EnergyInput(qubitOp)
offset
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
ee = ExactEigensolver(qubitOp, k=3)
result = ee.run()
print("energys:",result['eigenvalues'].real)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstates'][0])
print('energy:', result['eigenvalues'][0],', offset:',offset)
print('max-cut objective:', result['eigenvalues'][0] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
#VGG note that the other runs had implemeneted soft constrains!
from docplex.mp.model import Model
#VGG from qiskit.aqua.translators.ising import docplex
#older# from qiskit.optimization.ising import docplex
# Create an instance of a model and variables.
mdl = Model(name='max_cut')
y = {i: mdl.binary_var(name='x_{0}'.format(i)) for i in range(n)}
# Object function
#VGG added y[i]/100 term to split the degenerate 1<->0 states in favor of less free samples
max_cut_func = mdl.sum(y[i]/50+w[i,j]* y[i] * ( 1 - y[j] ) for i in range(n) for j in range(n))
mdl.maximize(max_cut_func)
# No constraints for Max-Cut problems.
qubitOp_docplex, offset_docplex = docplex.get_operator(mdl)
offset_docplex
#VGG define the above as a function
def set_up_Dcplx_model(W,*c_x0):
mdl = Model(name='max_cut')
y = {i: mdl.binary_var(name='y_{0}'.format(i)) for i in range(n)}
#VGG try to favor fewer free samples using (2-np.dot(x,x))/n/2
#VGG split the degenerate 1<->0 states in favor of less free samples using x[i]/n**2
max_cut_func=mdl.sum((-1)*(2-y[i])*0.5+(-0)*y[i]/100 for i in range(n))
#VGG don't give free samples to those with h==1
max_cut_func+=mdl.sum(h[i]*y[i]*0.55 for i in range(n) for h in c_x0)
max_cut_func+=mdl.sum(W[i,j]* y[i] * ( 1 - y[j] ) for i in range(n) for j in range(n))
mdl.maximize(max_cut_func)
qubitOp, offset = docplex.get_operator(mdl)
return qubitOp, offset
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w)
print(offset_docplex,x)
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
ee = ExactEigensolver(qubitOp_docplex, k=3)
result = ee.run()
print("energys:",result['eigenvalues'].real)
x = sample_most_likely(result['eigenstates'][0])
print('energy:', result['eigenvalues'][0].real)
print('max-cut objective:', result['eigenvalues'][0].real + offset_docplex)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
#VGG note if you keep executing this cell you can see diferent configurations
#VGG define the above as a function
def Max_Cut_Dcplx(W,*c_x0):
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W)
for h in c_x0:
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W,h)
ee = ExactEigensolver(qubitOp_docplex,k=3)
result = ee.run()
x=sample_most_likely(result['eigenstates'][0])
x_dcplx=max_cut.get_graph_solution(x).tolist()
cost_dcplx=result['eigenvalues'][0].real
#cost_dcplx=max_cut.max_cut_value(x, W)
return cost_dcplx, x_dcplx
%time market_simulations(10,'dcplx')
warnings.filterwarnings('once')
model=qubitOp_docplex
#model=qubitOp
backend1 = Aer.get_backend('statevector_simulator')
backend2 = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
seed = 10598
spsa = SPSA(max_trials=10)
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
#VGG backend1 = Aer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend2, seed_simulator=seed, seed_transpiler=seed)
print(backend2)
result = vqe.run(quantum_instance)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
#warnings.filterwarnings('ignore', 'DeprecationWarning')
warnings.filterwarnings('once')
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=30)
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
backend2 = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
quantum_instance = QuantumInstance(backend2, shots=1024, seed_simulator=seed, seed_transpiler=seed)
print(backend2)
result = vqe.run(quantum_instance)
"""declarative approach, update the param from the previous cell.
params['backend']['provider'] = 'qiskit.BasicAer'
params['backend']['name'] = 'qasm_simulator'
params['backend']['shots'] = 1024
result = run_algorithm(params, algo_input)
"""
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
plot_histogram(result['eigenstate'])
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
plot_histogram(result['eigenstate'])
backend_old=backend
#backend=backend2
#warnings.filterwarnings('ignore', 'DeprecationWarning')
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=10) #VGG 300
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
#backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
print("submiting for results using:",backend)
result = vqe.run(quantum_instance)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
plot_histogram(result['eigenstate'])
#VGG define the above as a function
def Max_Cut_IBMQ(W,*c_x0):
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W)
for h in c_x0:
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W,h)
model=qubitOp_docplex
spsa = SPSA(max_trials=10) #VGG 300
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
result = vqe.run(quantum_instance)
x = sample_most_likely(result['eigenstate'])
cost_vqe=max_cut.max_cut_value(x, W)
x_vqe =np.int_(max_cut.get_graph_solution(x)).tolist()
return cost_vqe, x_vqe
%time market_simulations(10)
%time market_simulations(10,'dcplx')
print(backend)
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
#backend = Aer.get_backend('qasm_simulator')
#backend = Aer.get_backend('statevector_simulator')
print(backend)
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
#backend = Aer.get_backend('qasm_simulator')
#backend = Aer.get_backend('statevector_simulator')
%time market_simulations(10,'q')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/VGGatGitHub/2020-QISKit-Summer-Jam
|
VGGatGitHub
|
#%pip uninstall qiskit
%pip install qiskit #==0.16
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
# useful additional packages
import matplotlib.pyplot as plt
import matplotlib.axes as axes
%matplotlib inline
import numpy as np
import networkx as nx
from qiskit import Aer
from qiskit.tools.visualization import plot_histogram
#VGG todo 1: the equivalent run_algorithm and EnergyInput versions updates
#from qiskit.aqua.translators.ising import max_cut, tsp
#from qiskit.aqua import run_algorithm
#from qiskit.aqua.input import EnergyInput
#old v0.16# from qiskit.optimization.ising import max_cut, tsp
#old v0.16# from qiskit.optimization.ising.common import sample_most_likely
#older# from qiskit.optimization.ising import docplex
from qiskit.optimization.applications.ising import max_cut, tsp
from qiskit.optimization.applications.ising.common import sample_most_likely
from qiskit.optimization.applications.ising import docplex
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import NumPyEigensolver as ExactEigensolver
from qiskit.aqua.components.optimizers import SPSA
#from qiskit.aqua.components.variational_forms import RY #RealAmplitudes
from qiskit.circuit.library import RealAmplitudes as RY
from qiskit.aqua import QuantumInstance
# setup aqua logging
import logging
from qiskit.aqua import set_qiskit_aqua_logging
# set_qiskit_aqua_logging(logging.DEBUG) # choose INFO, DEBUG to see the log
#from qiskit import IBMQ
#provider = IBMQ.load_account()
#VGG select the backend for coupling_map
try:
backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex')
#backend = provider.get_backend('ibmq_london')#'ibmq_16_melbourne')#'ibmq_essex')
except:
backend = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
coupling_map = backend.configuration().coupling_map
print(coupling_map)
from typing import List, Tuple
seed = 19120623
np.random.seed(seed)
#VGG: function adopted from the Rigetti's MaxCutQAOA.ipynb
def generate_ising_graph(edges: List[Tuple[int, int]]) -> nx.Graph:
graph = nx.from_edgelist(edges)
weights: np.ndarray = np.random.rand(graph.number_of_edges()) #VGG the old [-1,1] range into [0,1]
weights /= np.linalg.norm(weights)
nx.set_edge_attributes(graph, {e: {'weight': w} for e, w in zip(graph.edges, weights)})
return graph
if coupling_map != None:
G=generate_ising_graph(coupling_map)
n=G.number_of_nodes()
print(n)
# Generating a graph if there were no coupling_map
if coupling_map== None:
#define the edges / coupling_map
#'ibmq_16_melbourne'
elist=[[0, 1], [0, 14], [1, 0], [1, 2], [1, 13], [2, 1], [2, 3], [2, 12],
[3, 2], [3, 4], [3, 11], [4, 3], [4, 5], [4, 10], [5, 4], [5, 6],
[5, 9], [6, 5], [6, 8], [7, 8], [8, 6], [8, 7], [8, 9], [9, 5], [9, 8],
[9, 10], [10, 4], [10, 9], [10, 11], [11, 3], [11, 10], [11, 12], [12, 2],
[12, 11], [12, 13], [13, 1], [13, 12], [13, 14], [14, 0], [14, 13]]
#elist=[[0,1],[0,2],[0,3],[1,2],[2,3],[0,4],[0,2], [4, 3],[1,5],[3,5]]
elist=[[0,1],[0,2],[0,3],[1,2],[2,3],[0,4],[0,2], [4, 3]]
G=generate_ising_graph(elist)
n=G.number_of_nodes()
#other ways to define the graph
#n=5 # Number of nodes in graph
#G=nx.Graph()
#G.add_nodes_from(np.arange(0,n,1))
#ewlist=[(0,1,1.),(0,2,.5),(0,3,0),(1,2,1.0),(0,3,1.0)]
#G1 = nx.from_edgelist(elist)
#G1.add_weighted_edges_from(ewlist)
#Visulaize
print(G.number_of_nodes(),G.number_of_edges())
colors = ['r' for node in G.nodes()]
pos = nx.spring_layout(G)
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
nx.drawing.nx_pylab.draw(G)
elist=G.edges()
print("elist=",elist)
ewlist=[(i,j,G.get_edge_data(i,j,default=0)['weight']) for i,j in G.edges()]
print('ewlist=',ewlist)
def issymmetric(Matrix):
dim=Matrix.shape[0]
if Matrix.shape[1] != dim:
print("Shape Error!")
return False
for i in range(dim):
for j in range(i,dim):
if Matrix[i,j]!=Matrix[j,i]:
print("Shape Error:",(i,j),Matrix[i,j],Matrix[j,i],"difference:",Matrix[i,j]-Matrix[j,i])
return False
return True
# Computing the weight matrix from the random graph
w = np.zeros([n,n])
w = np.eye(n)
for i in range(n):
for j in range(n):
temp = G.get_edge_data(i,j,default=0)
if temp != 0:
w[i,j] = temp['weight']
w/=np.linalg.det(w)**(1/n)
print("Symmetric:",issymmetric(w),"Norm:",np.linalg.norm(w))
print("Eignvlues:",np.linalg.eigvals(w),"det:",np.linalg.det(w))
print(w)
np.sum(w)/4 #the offset value
def Max_Cut_BF(W,*x0):
best_cost_brute = 0
xbest_brute=np.array([1]*n)
for b in range(2**n):
x = [int(t) for t in reversed(list(bin(b)[2:].zfill(n)))]
cost = 0
for h in x0:
cost -= np.dot(h,x)/n #VGG don't give free samples to those with h==1
for i in range(n):
cost +=(2-np.dot(x,x))/n/2 #VGG try to favor fewer free samples
for j in range(n):
cost += W[i,j]*x[i]*(1-x[j])
if np.isclose(cost,best_cost_brute):
if sum(x)<sum(xbest_brute):
best_cost_brute = cost
xbest_brute = x
else:
if best_cost_brute < cost:
best_cost_brute = cost
xbest_brute = x
if 1==2:
print('case = ' + str(x)+ ' cost = ' + str(cost))
return best_cost_brute, xbest_brute
%%time
if n < 10:
best_cost_brute, xbest_brute = Max_Cut_BF(w)
colors = ['r' if xbest_brute[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, pos=pos)
print('\nBest solution = ' + str(xbest_brute) + ' cost = ' + str(best_cost_brute))
np.set_printoptions(precision=3)
print(w)
def market_simulations(m,*opt):
free_samples=0
boughten=0
Mw2 = np.zeros([n,n])+w
relations=np.zeros([n,n])
x_free_samples=np.zeros(n)
np.set_printoptions(precision=2)
if 'q' in opt:
print("Using Max_Cut option:",'q')
print("submiting for results using:",backend)
elif 'dcplx' in opt:
print("Using Max_Cut option:",'Docplex')
else:
print("Using Max_Cut_BF")
if n > 10 :
print("It may take too long to do Brute Force Calulations - skiping!")
return
print("day"," [free samples]"," [buyers distribution]"," [to be used as constrain]"," the two totals"," w-det")
for i in range(m):
if sum(x_free_samples)>n/3:
x_free_samples=np.zeros(n)
x=np.array([0]*n)
xbest_brute=np.array([0]*n)
tmp1=Mw2/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,]) #sum probaility contributions
tmp2=tmp
else:
if 'q' in opt:
best_cost_brute, xbest_brute = Max_Cut_IBMQ(Mw2,x_free_samples)
elif 'dcplx' in opt:
best_cost_brute, xbest_brute = Max_Cut_Dcplx(Mw2,x_free_samples)
else:
best_cost_brute, xbest_brute = Max_Cut_BF(Mw2,x_free_samples)
x=np.array(xbest_brute)
x_free_samples+=x
free_samples+=sum(x)
tmp1=Mw2[x==1]/np.linalg.norm(Mw2) #select only those rows that recived free samples
tmp=sum(tmp1[:,],(np.array([1]*n)-x)) #sum probaility contributions
tmp-=np.array([1]*n) #push to negative those with free samples
#print(tmp)
ab=sum(tmp[tmp > 0])
for j in range(n):
test=np.random.uniform()*ab/2
if tmp[j] > test: #buy the product
x[j]+=1
boughten+=1
x0=np.array(xbest_brute)
x-=x0
relation_today=x.reshape(n,1) @ x0.reshape(1,n)
relation_today+=relation_today.T
relations+=relation_today
#print(x0,x,"\n",relation_today)
#print(x0,x,x_free_samples,tmp)
print(i,x0,x,x_free_samples,free_samples, boughten,np.linalg.det(Mw2))
if i%7==0 : #weekely updates of the w matrix
Mw2+=(np.eye(n)+relations)/n/100 #update the w matrix
relations=np.zeros([n,n])
if issymmetric(Mw2) and np.linalg.det(Mw2)>0.:
Mw2/=np.linalg.det(Mw2)**(1/n)
else:
Mw2/=np.linalg.norm(Mw2,ord='fro')
print("\nlast day configuration record:\n")
print(x0,tmp)
print()
print(x,free_samples, boughten, np.linalg.norm(Mw2),np.linalg.det(Mw2))
print()
print(Mw2)
return
%time market_simulations(10)
#VGG qubitOp, offset = max_cut.get_max_cut_qubitops(w)
qubitOp, offset = max_cut.get_operator(w)
#algo_input = EnergyInput(qubitOp)
offset
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
ee = ExactEigensolver(qubitOp, k=3)
result = ee.run()
print("energys:",result['eigenvalues'].real)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstates'][0])
print('energy:', result['eigenvalues'][0],', offset:',offset)
print('max-cut objective:', result['eigenvalues'][0] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
#VGG note that the other runs had implemeneted soft constrains!
from docplex.mp.model import Model
#VGG from qiskit.aqua.translators.ising import docplex
#older# from qiskit.optimization.ising import docplex
# Create an instance of a model and variables.
mdl = Model(name='max_cut')
y = {i: mdl.binary_var(name='x_{0}'.format(i)) for i in range(n)}
# Object function
#VGG added y[i]/100 term to split the degenerate 1<->0 states in favor of less free samples
max_cut_func = mdl.sum(y[i]/50+w[i,j]* y[i] * ( 1 - y[j] ) for i in range(n) for j in range(n))
mdl.maximize(max_cut_func)
# No constraints for Max-Cut problems.
qubitOp_docplex, offset_docplex = docplex.get_operator(mdl)
offset_docplex
#VGG define the above as a function
def set_up_Dcplx_model(W,*c_x0):
mdl = Model(name='max_cut')
y = {i: mdl.binary_var(name='y_{0}'.format(i)) for i in range(n)}
#VGG try to favor fewer free samples using (2-np.dot(x,x))/n/2
#VGG split the degenerate 1<->0 states in favor of less free samples using x[i]/n**2
max_cut_func=mdl.sum((-1)*(2-y[i])*0.5+(-0)*y[i]/100 for i in range(n))
#VGG don't give free samples to those with h==1
max_cut_func+=mdl.sum(h[i]*y[i]*0.5 for i in range(n) for h in c_x0)
max_cut_func+=mdl.sum(W[i,j]* y[i] * ( 1 - y[j] ) for i in range(n) for j in range(n))
mdl.maximize(max_cut_func)
qubitOp, offset = docplex.get_operator(mdl)
return qubitOp, offset
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w)
print(offset_docplex,x)
#Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector
ee = ExactEigensolver(qubitOp_docplex, k=3)
result = ee.run()
print("energys:",result['eigenvalues'].real)
x = sample_most_likely(result['eigenstates'][0])
print('energy:', result['eigenvalues'][0].real)
print('max-cut objective:', result['eigenvalues'][0].real + offset_docplex)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
x=max_cut.get_graph_solution(x).tolist()
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
#VGG note if you keep executing this cell you can see diferent configurations
#VGG define the above as a function
def Max_Cut_Dcplx(W,*c_x0):
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W)
for h in c_x0:
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W,h)
ee = ExactEigensolver(qubitOp_docplex,k=3)
result = ee.run()
x=sample_most_likely(result['eigenstates'][0])
x_dcplx=max_cut.get_graph_solution(x).tolist()
cost_dcplx=result['eigenvalues'][0].real
#cost_dcplx=max_cut.max_cut_value(x, W)
return cost_dcplx, x_dcplx
%time market_simulations(10,'dcplx')
seed = 10598
model=qubitOp_docplex
#model=qubitOp
spsa = SPSA(max_trials=30)
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
backend1 = Aer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend1, seed_simulator=seed, seed_transpiler=seed)
print(backend1)
result = vqe.run(quantum_instance)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=300)
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
backend2 = Aer.get_backend('qasm_simulator') #VGG it was 'BasicAer.get_backend' ibmq_16_melbourne
quantum_instance = QuantumInstance(backend2, shots=1024, seed_simulator=seed, seed_transpiler=seed)
print(backend2)
result = vqe.run(quantum_instance)
"""declarative approach, update the param from the previous cell.
params['backend']['provider'] = 'qiskit.BasicAer'
params['backend']['name'] = 'qasm_simulator'
params['backend']['shots'] = 1024
result = run_algorithm(params, algo_input)
"""
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
plot_histogram(result['eigenstate'])
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(w,x)
print(x,offset_docplex)
backend
# run quantum algorithm with shots
seed = 10598
spsa = SPSA(max_trials=30) #VGG 300
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
#backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
print("submiting for results using:",backend)
result = vqe.run(quantum_instance)
#VGG# x = max_cut.sample_most_likely(result['eigvecs'][0])
x = sample_most_likely(result['eigenstate'])
print('energy:', result['eigenvalue'])
print('time:', result['optimizer_time'])
print('max-cut objective:', result['eigenvalue'] + offset)
print('solution:', max_cut.get_graph_solution(x))
print('solution objective:', max_cut.max_cut_value(x, w))
plot_histogram(result['eigenstate'])
colors = ['r' if max_cut.get_graph_solution(x)[i] == 0 else 'b' for i in range(n)]
nx.draw_networkx(G, node_color=colors, node_size=600, alpha = .8, pos=pos)
#VGG define the above as a function
def Max_Cut_IBMQ(W,*c_x0):
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W)
for h in c_x0:
qubitOp_docplex, offset_docplex = set_up_Dcplx_model(W,h)
model=qubitOp_docplex
spsa = SPSA(max_trials=30) #VGG 300
ry = RY(model.num_qubits, entanglement='linear') #depth=5,
vqe = VQE(model, ry, spsa)
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
result = vqe.run(quantum_instance)
x = sample_most_likely(result['eigenstate'])
cost_vqe=max_cut.max_cut_value(x, W)
x_vqe =np.int_(max_cut.get_graph_solution(x)).tolist()
return cost_vqe, x_vqe
%time market_simulations(10)
%time market_simulations(10,'dcplx')
#backend = provider.get_backend('ibmq_16_melbourne')#'ibmq_16_melbourne')#'ibmq_essex' # ibmq_london
#backend = provider.get_backend('ibmq_qasm_simulator')
#backend = Aer.get_backend('qasm_simulator')
#backend = Aer.get_backend('statevector_simulator')
%time market_simulations(10,'q')
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/VGGatGitHub/2020-QISKit-Summer-Jam
|
VGGatGitHub
|
import pandas as pd
data = pd.read_csv("data/breast-cancer.csv")
diagnosis = data["diagnosis"]
labels = diagnosis.map({"M": 1, "B": -1}).values
features = data.drop(["id", "diagnosis"], axis=1)
#VGG coomet next three lines to run with the originla data above
data = pd.read_csv("data/formatted_titanic.csv")
labels = data["survived"]+2*data["sex"]
features = data.drop(["survived","sex"], axis=1)
data.shape
# Data visualization
import matplotlib.pyplot as plt
import umap
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
reducer = umap.UMAP(n_components=3)
embedding = reducer.fit_transform(scaled_features)
embedding.shape
plt.scatter(
embedding[:, 0],embedding[:, 1], c=labels)
plt.scatter(
embedding[:, 0],embedding[:, 2], c=labels)
plt.scatter(
embedding[:, 1],embedding[:, 2], c=labels)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(embedding[:, 0],embedding[:, 1],embedding[:, 2], c=labels)
plt.show()
from sklearn.decomposition import PCA
pca = PCA(n_components=0.75)
features_pca = pca.fit_transform(features)
pca.n_components_
features_pca.shape
plt.scatter(features_pca[:, 0],features_pca[:, 1], c=labels)
plt.scatter(features_pca[:, 0],features_pca[:, 2], c=labels)
plt.scatter(features_pca[:, 1],features_pca[:, 2], c=labels)
fig3d = plt.figure()
ax3d = fig3d.add_subplot(projection='3d')
ax3d.scatter(features_pca[:, 0],features_pca[:, 1],features_pca[:, 2], c=labels)
plt.show()
from qcfl.preprocessing import scale_for_angle_encoding
features_scaled = scale_for_angle_encoding(features_pca)
from utils import split_data, write_shards
dataset = pd.DataFrame(data=features_scaled)
dataset['target'] = labels
dataset = dataset.sample(frac=1)
shards = split_data(dataset, [0.3, 0.1, 0.2, 0.2, 0.2])
write_shards(shards, 'cancer')
from pennylane import AdamOptimizer
from qcfl.penny.classifier import QuantumClassifier
from qcfl.penny.models import SimplQMLModel
from qcfl.penny.federated_qml import create_clients, FederatedServer, to_numpy_dataset
num_qubits = features_pca.shape[1]
num_layers = 3
batch_size = 5
clients = create_clients(shards[0:4], lambda: QuantumClassifier(num_qubits, num_layers, batch_size,
AdamOptimizer(),
SimplQMLModel(num_qubits)))
classifier = QuantumClassifier(num_qubits, num_layers, batch_size,
AdamOptimizer(),
SimplQMLModel(num_qubits))
server = FederatedServer(classifier, clients, client_epochs=10)
# Train
from pennylane import numpy as np
weights = 0.01 * np.random.randn(num_layers, num_qubits, 3, requires_grad=True)
bias = np.array(0.0, requires_grad=True)
trained_weights, trained_bias = server.train(weights, bias, iterations=15)
from sklearn.metrics import classification_report
test_features, test_labels = to_numpy_dataset(shards[-1])
test_predictions = [classifier.classify(trained_weights, trained_bias, f) for f in test_features]
target_names = ['Malignant', 'Benign']
target_names = [str(i) for i in set(labels.values).union()]#VGG comment this line for cancer
cr = classification_report(test_labels, test_predictions, target_names=target_names)
print(cr)
set(labels.values).union()
from typing import List, Tuple
from flwr.common import Metrics
def fit_round(server_round: int):
"""Send round number to client."""
return {"server_round": server_round}
def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:
# Multiply accuracy of each client by number of examples used
accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics]
examples = [num_examples for num_examples, _ in metrics]
# Aggregate and return custom metric (weighted average)
return {"accuracy": sum(accuracies) / sum(examples)}
def get_evaluate_fn(classifier, X_test, y_test):
"""Return an evaluation function for server-side evaluation."""
# The `evaluate` function will be called after every round
def evaluate(server_round, parameters: fl.common.NDArrays, config):
# Update model with the latest parameters
weights, bias = parameters[0], parameters[1]
classifier.set_parameters(weights, bias)
loss, accuracy = classifier.evaluate(server_round, X_test, y_test)
return loss, {"accuracy": accuracy}
return evaluate
from qcfl.flower.flower_qml_client import PennylaneClient
num_qubits = features_pca.shape[1]#VGG
NUM_CLIENTS = 4
client_resources = None
def client_fn(cid: str) -> PennylaneClient:
"""Create a Flower client representing a single organization."""
print(cid)
index = int(cid)+1
# Load a shard corresponding to this client
X_train, y_train = load_shard('cancer', index)
X_train = np.array(X_train, requires_grad=False)
y_train = np.array(y_train, requires_grad=False)
classifier = QuantumClassifier(num_qubits, 3, 5,
AdamOptimizer(),
SimplQMLModel(num_qubits))
# Define Flower client
return PennylaneClient(f'cancer{index}',
classifier,
X_train, y_train)
from utils import load_shard
# Load test data here to avoid the overhead of doing it in `evaluate` itself
X_test, y_test = load_shard('cancer', 5)
# Create a classifier to hold the final weights
classifier = QuantumClassifier(num_qubits, 3, 10,
AdamOptimizer(),
SimplQMLModel(num_qubits))
import flwr as fl
# Create FedAvg strategy
strategy = fl.server.strategy.FedAvg(
fraction_fit=1.0,
fraction_evaluate=0.5,
min_available_clients=NUM_CLIENTS,
on_fit_config_fn=fit_round,
evaluate_fn=get_evaluate_fn(classifier, X_test, y_test),
evaluate_metrics_aggregation_fn=weighted_average
)
# Run flower in simulation mode
fl.simulation.start_simulation(
client_fn=client_fn,
num_clients=NUM_CLIENTS,
config=fl.server.ServerConfig(num_rounds=15),
strategy=strategy,
client_resources=client_resources,
)
#TODO: Plot loss, accuracy
# Compute metrics on final model
test_features, test_labels = to_numpy_dataset(shards[-1])
test_predictions = [classifier.classify(trained_weights, trained_bias, f) for f in test_features]
target_names = ['Malignant', 'Benign']
cr = classification_report(test_labels, test_predictions, target_names=target_names)
print(cr)
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import unif_bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_lima') # type here hardware backend
# for WIP
import importlib
importlib.reload(bf)
lengths = np.arange(1, 2500, 250)
num_samples = 10
seed = 1010
qubits = [0]
# Run an RB experiment on qubit 0
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend).block_for_results()
results1 = expdata1.analysis_results()
# View result data
display(expdata1.figure(0))
for result in results1:
print(result)
popt = expdata1.analysis_results()[0].value.value
pcov = expdata1.analysis_results()[0].extra['covariance_mat']
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
EPG_dic = {}
for i in range(3,6):
EPG_key = expdata1.analysis_results()[i].name
EPG_dic[EPG_key] = expdata1.analysis_results()[i].value.value
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate =''
# get count data
Y = bf.get_GSP_counts(expdata1._data, len(lengths),range(num_samples))
expdata1._data[1]
experiment_type = expdata1._data[0]['metadata']['experiment_type']
physical_qubits = expdata1._data[0]['metadata']['physical_qubits']
shots = expdata1._data[0]['shots']
#build model
pooled_model = bf.build_bayesian_model(model_type="pooled",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov)
pm.model_to_graphviz(pooled_model)
trace_p = bf.get_trace(pooled_model, target_accept = 0.95)
# backend's recorded EPG
print(RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(pooled_model, trace_p, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
#build model
hierarchical_model = bf.build_bayesian_model(model_type="h_sigma",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(hierarchical_model)
trace_h = bf.get_trace(hierarchical_model, target_accept = 0.99)
# backend's recorded EPG
print(RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(hierarchical_model, trace_h, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic, routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate =''
physical_qubits = qubits = (1,4)
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ # defined for the 2-qubit run
lengths = np.arange(1, 200, 30)
lengths_1_qubit = np.arange(1, 2500, 250)
num_samples = 10
seed = 1010
# Run a 1-qubit RB expriment on each qubit to determine the error-per-gate of 1-qubit gates
expdata_1q = {}
epg_1q = []
for qubit in qubits:
exp = StandardRB([qubit], lengths_1_qubit, num_samples=num_samples, seed=seed)
expdata = exp.run(backend).block_for_results()
expdata_1q[qubit] = expdata
epg_1q += expdata.analysis_results()
# Run an RB experiment on qubits 1, 4
exp2 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_1q)
# Run the 2-qubit experiment
expdata2 = exp2.run(backend).block_for_results()
# View result data
results2 = expdata2.analysis_results()
# View result data
display(expdata2.figure(0))
for result in results2:
print(result)
# Compare the computed EPG of the cx gate with the backend's recorded cx gate error:
expected_epg = RBUtils.get_error_dict_from_backend(backend, qubits)[(qubits, 'cx')]
exp2_epg = expdata2.analysis_results("EPG_cx").value
print("Backend's reported EPG of the cx gate:", expected_epg)
print("Experiment computed EPG of the cx gate:", exp2_epg)
popt = expdata2.analysis_results()[0].value.value
pcov = expdata2.analysis_results()[0].extra['covariance_mat']
epc_est_fm = expdata2.analysis_results()[2].value.value
epc_est_fm_err = expdata2.analysis_results()[2].value.stderr
EPG_dic = {}
EPG_key = 'cx' #expdata2.analysis_results()[3].name
EPG_dic[EPG_key] = expdata2.analysis_results()[3].value.value
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
desired_gate ='cx'
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == 'cx' and tuple_e[1] == physical_qubits:
epc_calib = tuple_e[2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# get count data
Y = bf.get_GSP_counts(expdata2._data, len(lengths),range(num_samples))
experiment_type = expdata2._data[0]['metadata']['experiment_type']
physical_qubits = expdata2._data[0]['metadata']['physical_qubits']
shots = expdata2._data[0]['shots']
#build model
S2QBp_model = bf.build_bayesian_model(model_type="pooled",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov)
pm.model_to_graphviz(S2QBp_model)
trace_p2 = bf.get_trace(S2QBp_model, target_accept = 0.95)
bf.RB_bayesian_results(S2QBp_model, trace_p2, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
#build model
S2QBh_model = bf.build_bayesian_model(model_type="h_sigma",Y=Y,shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(S2QBh_model)
trace_h2 = bf.get_trace(S2QBh_model)
bf.RB_bayesian_results(S2QBh_model, trace_h2, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate = "x"
qubits = [0]
interleaved_circuit = circuits.XGate()
lengths = np.arange(1, 2500, 250)
num_samples = 10
seed = 1010
# Run an interleaved RB experiment
int_exp1 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
# Run
int_expdata1 = int_exp1.run(backend).block_for_results()
int_results1 = int_expdata1.analysis_results()
# View result data
display(int_expdata1.figure(0))
for result in int_results1:
print(result)
popt = int_expdata1.analysis_results()[0].value.value
pcov = int_expdata1.analysis_results()[0].extra['covariance_mat']
popt[2] = popt[1]/popt[2] # replace alpha_C by p_tilde
# WIP rigorously the covariance matrix could be modified too if used
epc_est_fm = int_expdata1.analysis_results()[3].value.value
epc_est_fm_err = int_expdata1.analysis_results()[3].value.stderr
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate ='x'
# get count data
Y1 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(1,2*num_samples,2))
int_expdata1._data[1]
experiment_type = int_expdata1._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata1._data[0]['metadata']['physical_qubits']
shots = int_expdata1._data[0]['shots']
Y=np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde1 = bf.build_bayesian_model("tilde",Y=Y,shots=shots,
m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde1)
trace_t = bf.get_trace(tilde1)
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == interleaved_gate and tuple_e[1] == qubits:
epc_calib = np.nan = tuple_e[2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
# epc_calib = 2.307E-4 + (23.6-7)*(2.193E-4 - 2.307E-4)/24
bf.RB_bayesian_results(tilde1, trace_t, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
import importlib
importlib.reload(bf)
Y=np.hstack((Y1,Y2))
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
tilde2 = bf.build_bayesian_model("h_tilde",Y=Y,shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI = RvsI_h, IvsR = IvsR_h,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(tilde2)
trace_t3 = bf.get_trace(tilde2, target_accept = .95)
bf.RB_bayesian_results(tilde2, trace_t3, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate = "cx"
physical_qubits = qubits = [1,4]
interleaved_circuit = circuits.CXGate()
lengths = np.arange(1, 200, 30)
num_samples = 10
seed = 1010
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == interleaved_gate and tuple_e[1] == physical_qubits:
epc_calib = np.nan = tuple_e[2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# Run an interleaved RB experiment
int_exp2 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
# Run
int_expdata2 = int_exp2.run(backend).block_for_results()
int_results2 = int_expdata2.analysis_results()
# View result data
display(int_expdata2.figure(0))
for result in int_results2:
print(result)
popt = int_expdata2.analysis_results()[0].value.value
pcov = int_expdata2.analysis_results()[0].extra['covariance_mat']
popt[2] = popt[1]/popt[2] # replace alpha_C by p_tilde
# WIP rigorously the covariance matrix could be modified too if used
epc_est_fm = int_expdata2.analysis_results()[3].value.value
epc_est_fm_err = int_expdata2.analysis_results()[3].value.stderr
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate ='cx'
# get count data
Y1 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(1,2*num_samples,2))
int_expdata2._data[1]
experiment_type = int_expdata2._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata2._data[0]['metadata']['physical_qubits']
shots = int_expdata2._data[0]['shots']
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
# epc_calib = 2.307E-4 + (23.6-7)*(2.193E-4 - 2.307E-4)/24
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde3 = bf.build_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde3)
trace_t3 = bf.get_trace(tilde3)
bf.RB_bayesian_results(tilde3, trace_t3, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
import importlib
importlib.reload(bf)
# use 2m length array
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
tilde4 = bf.build_bayesian_model("h_tilde",Y=np.hstack((Y1,Y2)),
shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI = RvsI_h, IvsR = IvsR_h,
sigma_theta=0.005,sigma_theta_l=0.001,sigma_theta_u=0.05)
pm.model_to_graphviz(tilde4)
trace_t4 = bf.get_trace(tilde4, target_accept = .99)
bf.RB_bayesian_results(tilde4, trace_t4, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import unif_bayesian_fitter as bf
simulation = False # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# for WIP
import importlib
importlib.reload(bf)
lengths = np.arange(1, 2225, 225)
num_samples = 10
seed = 1010
qubits = [4]
# last backend's recorded EPG before experiments
print(RBUtils.get_error_dict_from_backend(backend, qubits))
# backend's recorded EPG after experiments
print(RBUtils.get_error_dict_from_backend(backend, qubits))
# Correct reference for shift
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.0001600 + (18+25/60-7)*(0.0001772 - 0.0001600)/24
print('EPC calibration: {0:1.4e}'.format(epc_calib))
# Run an RB experiment on qubit 0
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend).block_for_results()
results1 = expdata1.analysis_results()
# View result data
display(expdata1.figure(0))
for result in results1:
print(result)
popt = expdata1.analysis_results()[0].value.value
pcov = expdata1.analysis_results()[0].extra['covariance_mat']
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
EPG_dic = {}
for i in range(3,6):
EPG_key = expdata1.analysis_results()[i].name
EPG_dic[EPG_key] = expdata1.analysis_results()[i].value.value
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate =''
# get count data
Y = bf.get_GSP_counts(expdata1._data, len(lengths),range(num_samples))
expdata1._data[1]
experiment_type = expdata1._data[0]['metadata']['experiment_type']
physical_qubits = expdata1._data[0]['metadata']['physical_qubits']
shots = expdata1._data[0]['shots']
#build model
pooled_model = bf.build_bayesian_model(model_type="pooled",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov)
pm.model_to_graphviz(pooled_model)
trace_p = bf.get_trace(pooled_model, target_accept = 0.95)
bf.RB_bayesian_results(pooled_model, trace_p, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
#build model
hierarchical_model = bf.build_bayesian_model(model_type="h_sigma",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(hierarchical_model)
trace_h = bf.get_trace(hierarchical_model, target_accept = 0.99)
bf.RB_bayesian_results(hierarchical_model, trace_h, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic, routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate = "x"
qubits = [4]
interleaved_circuit = circuits.XGate()
lengths = np.arange(1, 2250, 225)
num_samples = 10
seed = 1010
# Run an interleaved RB experiment
int_exp1 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
# Run
int_expdata1 = int_exp1.run(backend).block_for_results()
int_results1 = int_expdata1.analysis_results()
# View result data
display(int_expdata1.figure(0))
for result in int_results1:
print(result)
popt = int_expdata1.analysis_results()[0].value.value
pcov = int_expdata1.analysis_results()[0].extra['covariance_mat']
popt[2] = popt[1]/popt[2] # replace alpha_C by p_tilde
# WIP rigorously the covariance matrix could be modified too if used
epc_est_fm = int_expdata1.analysis_results()[3].value.value
epc_est_fm_err = int_expdata1.analysis_results()[3].value.stderr
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate ='x'
# get count data
Y1 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(1,2*num_samples,2))
int_expdata1._data[1]
experiment_type = int_expdata1._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata1._data[0]['metadata']['physical_qubits']
shots = int_expdata1._data[0]['shots']
Y=np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde1 = bf.build_bayesian_model("tilde",Y=Y,shots=shots,
m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde1)
trace_t = bf.get_trace(tilde1)
bf.RB_bayesian_results(tilde1, trace_t, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
# Correct reference for shift
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.0001600 + (17+36/60-7)*(0.0001772 - 0.0001600)/24
print('EPC calibration: {0:.6f}'.format(epc_calib))
import importlib
importlib.reload(bf)
Y=np.hstack((Y1,Y2))
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
tilde2 = bf.build_bayesian_model("h_tilde",Y=Y,shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI = RvsI_h, IvsR = IvsR_h,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(tilde2)
trace_t3 = bf.get_trace(tilde2, target_accept = .95)
bf.RB_bayesian_results(tilde2, trace_t3, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
# Correct reference for shift
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.0001600 + (17+36/60-7)*(0.0001772 - 0.0001600)/24
print('EPC calibration: {0:.6f}'.format(epc_calib))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import unif_bayesian_fitter as bf
simulation = False # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# for WIP
import importlib
importlib.reload(bf)
# describe RB experiment
interleaved_gate =''
physical_qubits = qubits = (0,1)
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ # defined for the 2-qubit run
lengths = np.arange(1, 200, 30)
lengths_1_qubit = np.arange(1, 2225, 225)
num_samples = 10
seed = 1010
# Run a 1-qubit RB expriment on each qubit to determine the error-per-gate of 1-qubit gates
expdata_1q = {}
epg_1q = []
for qubit in qubits:
exp = StandardRB([qubit], lengths_1_qubit, num_samples=num_samples, seed=seed)
expdata = exp.run(backend).block_for_results()
expdata_1q[qubit] = expdata
epg_1q += expdata.analysis_results()
# Run an RB experiment on 2 qubits
exp2 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_1q)
# Run the 2-qubit experiment
expdata2 = exp2.run(backend).block_for_results()
# View result data
results2 = expdata2.analysis_results()
# View result data
display(expdata2.figure(0))
for result in results2:
print(result)
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.010745 + (22+40/60-7)*(0.008261 - 0.010745)/24
print('EPC calibration: {0:0.4e}'.format(epc_calib))
popt = expdata2.analysis_results()[0].value.value
pcov = expdata2.analysis_results()[0].extra['covariance_mat']
epc_est_fm = expdata2.analysis_results()[2].value.value
epc_est_fm_err = expdata2.analysis_results()[2].value.stderr
EPG_dic = {}
EPG_key = 'cx' #expdata2.analysis_results()[3].name
EPG_dic[EPG_key] = expdata2.analysis_results()[3].value.value
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
desired_gate ='cx'
# get count data
Y = bf.get_GSP_counts(expdata2._data, len(lengths),range(num_samples))
experiment_type = expdata2._data[0]['metadata']['experiment_type']
physical_qubits = expdata2._data[0]['metadata']['physical_qubits']
shots = expdata2._data[0]['shots']
#build model
S2QBp_model = bf.build_bayesian_model(model_type="pooled",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov)
pm.model_to_graphviz(S2QBp_model)
trace_p2 = bf.get_trace(S2QBp_model, target_accept = 0.95)
bf.RB_bayesian_results(S2QBp_model, trace_p2, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.010745 + (22+40/60-7)*(0.008261 - 0.010745)/24
print('EPC calibration: {0:0.4e}'.format(epc_calib))
#build model
S2QBh_model = bf.build_bayesian_model(model_type="h_sigma",Y=Y,shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(S2QBh_model)
trace_h2 = bf.get_trace(S2QBh_model)
bf.RB_bayesian_results(S2QBh_model, trace_h2, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.010745 + (22+40/60-7)*(0.008261 - 0.010745)/24
print('EPC calibration: {0:0.4e}'.format(epc_calib))
# describe RB experiment
interleaved_gate = "cx"
physical_qubits = qubits = [0,1]
interleaved_circuit = circuits.CXGate()
lengths = np.arange(1, 200, 30)
num_samples = 10
seed = 1010
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
epc_calib = 0.010745 + (22+19/60-7)*(0.008261 - 0.010745)/24
print('EPC calibration: {0:0.4e}'.format(epc_calib))
# Run an interleaved RB experiment
int_exp2 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
# Run
int_expdata2 = int_exp2.run(backend).block_for_results()
int_results2 = int_expdata2.analysis_results()
# View result data
display(int_expdata2.figure(0))
for result in int_results2:
print(result)
popt = int_expdata2.analysis_results()[0].value.value
pcov = int_expdata2.analysis_results()[0].extra['covariance_mat']
popt[2] = popt[1]/popt[2] # replace alpha_C by p_tilde
epc_est_fm = int_expdata2.analysis_results()[3].value.value
epc_est_fm_err = int_expdata2.analysis_results()[3].value.stderr
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate ='cx'
# get count data
Y1 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(1,2*num_samples,2))
int_expdata2._data[1]
experiment_type = int_expdata2._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata2._data[0]['metadata']['physical_qubits']
shots = int_expdata2._data[0]['shots']
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde3 = bf.build_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde3)
trace_t3 = bf.get_trace(tilde3)
bf.RB_bayesian_results(tilde3, trace_t3, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
# use 2m length array
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
tilde4 = bf.build_bayesian_model("h_tilde",Y=np.hstack((Y1,Y2)),
shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI = RvsI_h, IvsR = IvsR_h,
sigma_theta=0.005,sigma_theta_l=0.001,sigma_theta_u=0.05)
pm.model_to_graphviz(tilde4)
trace_t4 = bf.get_trace(tilde4, target_accept = .99)
bf.RB_bayesian_results(tilde4, trace_t4, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import time
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# describe RB experiment
interleaved_gate = "cx"
qubits = [1,4]
interleaved_circuit = circuits.CXGate()
lengths = np.arange(1, 200, 15)
num_samples = 10
seed = 1010
# get the backend's calibration value
t = None # enter t in datetime format if necessary
# use properties(datetime=t) if t is defined
e_list = dv.gate_error_values(backend.properties())
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == 'cx' and tuple_e[1] == qubits:
epc_calib = tuple_e[2]
print('EPC calibration: {0:1.4e}'.format(epc_calib))
#prepare circuits
int_exp2 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
#run
print("start experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
int_expdata2 = int_exp2.run(backend).block_for_results()
print(" end experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
#analyse
print(" start analysis",time.strftime('%d/%m/%Y %H:%M:%S'))
int_results2 = int_expdata2.analysis_results()
print(" end analysis",time.strftime('%d/%m/%Y %H:%M:%S'))
# look at result data
for result in int_results2:
print(result)
def get_GSP_counts(data, x_length, data_range):
#obtain the observed counts used in the bayesian model
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for i_samples in data_range:
row_list = []
for c_index in range(x_length) :
total_counts = 0
i_data = i_samples*x_length + c_index
for key,val in data[i_data]['counts'].items():
if key in list_bitstring:
total_counts += val
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
# get count data and other values from int_expdata2
Y1 = get_GSP_counts(int_expdata2._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = get_GSP_counts(int_expdata2._data, len(lengths),
range(1,2*num_samples,2))
experiment_type = int_expdata2._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata2._data[0]['metadata']['physical_qubits']
shots = int_expdata2._data[0]['shots']
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
# to compare ultimately: EPC and sigma(EPC) by LSF
epc_est_fm = int_expdata2.analysis_results()[3].value.value
epc_est_fm_err = int_expdata2.analysis_results()[3].value.stderr
# use 2m length array
Y = np.hstack((Y1,Y2))
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
X0 = np.tile(lengths,2)
X = np.vstack((X0,RvsI_h,IvsR_h))
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical_model"
# priors for unknown model parameters
T_priors = int_expdata2.analysis_results()[0].value.value
print(T_priors)
# building the model
h_model = pm.Model()
with h_model:
# Tying parameters
BoundedUniform = pm.Bound(pm.Uniform,
lower=np.fmax(T_priors-0.1, np.full(T_priors.shape,1.e-9)),
upper=np.fmin(T_priors+0.1, np.full(T_priors.shape,1.-1e-9)))
pi = BoundedUniform("Tying_Parameters",testval = T_priors, shape = T_priors.shape)
EPC = pm.Deterministic('EPC', scale*(1-pi[2]))
# sigma of Beta functions
sigma_t = pm.Uniform("σ_Beta", testval = 0.005,
upper = 0.05, lower = 0.0005)
# Tying function
GSP = pi[0] * ( X[1]*pi[1]**X[0] +\
X[2]*(pi[1]*pi[2])**X[0] ) + pi[3]
theta = pm.Beta('θ', mu=GSP, sigma = sigma_t,
shape = ((2*len(lengths, ))) )
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p = theta, observed = Y, n = shots)
# model graph
pm.model_to_graphviz(h_model)
# sample
with h_model:
trace_h = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h_model:
az.plot_trace(trace_h);
with h_model:
az.plot_posterior(trace_h, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h_model:
# (hdi_prob=.94 is default)
azt_summary = az.summary(trace_h, round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats")
azt_summary
# mean and sigma of EPC
epc_est_a = azt_summary['mean']['EPC']
epc_est_a_err = azt_summary['sd']['EPC']
# plot
import matplotlib.pyplot as plt # if not yet imported
with h_model:
az.plot_posterior(trace_h, var_names = ["EPC"],
round_to = 4, figsize = [10,6],
textsize = 12)
Bayes_legend = "EPC SMC: {0:1.3e} ± {1:1.3e}"\
.format(epc_est_a, epc_est_a_err)
LSF_legend = "EPC LSF: {0:1.3e} ± {1:1.3e}".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
plt.axvline(x=epc_est_fm,color='cyan',ls="-")
if epc_calib != np.nan:
plt.axvline(x=epc_calib,color='r',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend,
Cal_legend), fontsize=12 )
else:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend))
plt.title(experiment_type +', ' + interleaved_gate + " qubit(s):" + str(physical_qubits)\
+', backend: '+backend.name() + "\n Bayesian "+model,
fontsize=16);
# compare LSF and SMC
print("Model: Frequentist Bayesian Calibration")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sigma ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
def calc_chisquare(ydata, sigma, ycalc):
r = ydata - ycalc
chisq = np.sum((r / sigma) ** 2)
return chisq
# prepare box for GSP plot
# perform reduced χ² value calculation for Bayes hierarchical
mean_h = trace_h.posterior.mean(dim=['chain', 'draw'])
theta_stacked = mean_h.θ.values
NDF_h = len(lengths)*2 - 4 - 1 # (-1 is for σ_Beta)
chisq_h = calc_chisquare(y_mean, sigma_y, theta_stacked)/NDF_h
texto_0 = " alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[1]'],
azt_summary['sd']['Tying_Parameters[1]'])
texto_1 =" alpha_c = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[2]'],
azt_summary['sd']['Tying_Parameters[2]'])
texto_2 = " EPC = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['EPC'],
azt_summary['sd']['EPC'])
texto_3 = " Fit χ² = {0:7.4f} "\
.format(chisq_h)
texto = texto_0 + "\n" + texto_1 + "\n" + texto_2 + "\n" + texto_3
# prepare data for GSP plot
# get the calculated GSP values
with h_model:
hdi_prob = .94
# (hdi_prob=.94 is default, roughly coreresponding to 2σ)
theta_summary = az.summary(trace_h, round_to=12, hdi_prob = hdi_prob,
var_names = ["θ"], kind="stats")
y1 = theta_summary.values[:,0][0:len(lengths)]
y2 = theta_summary.values[:,0][len(lengths):len(lengths)*2]
HDI = False # make your choice here
if HDI:
# HDI values as bounds
bounds_rmk = "(shown bounds are "+ str(int(100*hdi_prob)) + "% HDI)"
y1_min = theta_summary.values[:,2][0:len(lengths)]
y2_min = theta_summary.values[:,2][len(lengths):len(lengths)*2]
y1_max = theta_summary.values[:,3][0:len(lengths)]
y2_max = theta_summary.values[:,3][len(lengths):len(lengths)*2]
else:
# two sigma bounds for plot
bounds_rmk = "(shown bounds are ± two σ)"
sy = theta_summary.values[:,1]
y1_min = y1 - sy[0:len(lengths)]*2
y1_max = y1 + sy[0:len(lengths)]*2
y2_min = y2 - sy[len(lengths):len(lengths)*2]*2
y2_max = y2 + sy[len(lengths):len(lengths)*2]*2
# GSP plot
import matplotlib.pyplot as plt # if not yet imported
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 14}
plt.rc('font', **font)
fig, plt = plt.subplots(1, 1, figsize = [8,5])
plt.set_ylabel("P(0)")
plt.set_xlabel("Clifford Length")
plt.legend(("Standard" , "Interleaved" ),
loc = 'center right', fontsize=10)
plt.plot(lengths,y1,color="purple", marker="o", lw = 0.75)
#plt.errorbar(lengths,y1,2*sy[0:len(lengths)],
#color="purple", marker='o')
plt.fill_between(lengths, y1_min, y1_max,
alpha=.2, edgecolor='purple', facecolor= 'r')
plt.plot(lengths,y2,color="cyan", marker='^', lw = 0.75)
#plt.errorbar(lengths,y2,2*sy[len(lengths):2*len(lengths)],
#color="cyan", marker='^')
plt.fill_between(lengths, y2_min, y2_max,
alpha=.2, edgecolor='cyan', facecolor= 'cyan')
for i_seed in range(num_samples):
plt.scatter(lengths, Y1[i_seed,:]/shots,
label = "data", marker="x",color="grey")
plt.scatter(lengths, Y2[i_seed,:]/shots,
label = "data", marker="+",color="grey")
plt.legend(("Standard" , "Interleaved" ),
loc = 'center right', fontsize=10)
plt.text(lengths[-1]*0.3,0.75, texto, bbox=dict(facecolor='white'))
plt.grid()
plt.set_title(experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk,
fontsize=14);
# View result data for frequentist model
display(int_expdata2.figure(0))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import time
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
simulation = False # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# describe RB experiment
interleaved_gate = "cx"
qubits = [0,1]
interleaved_circuit = circuits.CXGate()
lengths = np.arange(1, 200, 30)
num_samples = 10
seed = None
# get the backend's calibration value
t = None # enter t in datetime format if necessary
# use properties(datetime=t) if t is defined
e_list = dv.gate_error_values(backend.properties())
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == 'cx' and tuple_e[1] == qubits:
epc_calib = tuple_e[2]
print('EPC calibration: {0:1.4e}'.format(epc_calib))
#prepare circuits
int_exp2 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
#run
print("start experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
int_expdata2 = int_exp2.run(backend).block_for_results()
print(" end experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
#analyse
print(" start analysis",time.strftime('%d/%m/%Y %H:%M:%S'))
int_results2 = int_expdata2.analysis_results()
print(" end analysis",time.strftime('%d/%m/%Y %H:%M:%S'))
# look at result data
for result in int_results2:
print(result)
def get_GSP_counts(data, x_length, data_range):
#obtain the observed counts used in the bayesian model
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for i_samples in data_range:
row_list = []
for c_index in range(x_length) :
total_counts = 0
i_data = i_samples*x_length + c_index
for key,val in data[i_data]['counts'].items():
if key in list_bitstring:
total_counts += val
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
# get count data and other values from int_expdata2
Y1 = get_GSP_counts(int_expdata2._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = get_GSP_counts(int_expdata2._data, len(lengths),
range(1,2*num_samples,2))
experiment_type = int_expdata2._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata2._data[0]['metadata']['physical_qubits']
shots = int_expdata2._data[0]['shots']
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
# to compare ultimately: EPC and sigma(EPC) by LSF
epc_est_fm = int_expdata2.analysis_results()[3].value.value
epc_est_fm_err = int_expdata2.analysis_results()[3].value.stderr
# use 2m length array
Y = np.hstack((Y1,Y2))
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
X0 = np.tile(lengths,2)
X = np.vstack((X0,RvsI_h,IvsR_h))
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical_model"
# priors for unknown model parameters
T_priors = int_expdata2.analysis_results()[0].value.value
print(T_priors)
# building the model
h_model = pm.Model()
with h_model:
# Tying parameters
BoundedUniform = pm.Bound(pm.Uniform,
lower=np.fmax(T_priors-0.1, np.full(T_priors.shape,1.e-9)),
upper=np.fmin(T_priors+0.1, np.full(T_priors.shape,1.-1e-9)))
pi = BoundedUniform("Tying_Parameters",testval = T_priors, shape = T_priors.shape)
EPC = pm.Deterministic('EPC', scale*(1-pi[2]))
# sigma of Beta functions
sigma_t = pm.Uniform("σ_Beta", testval = 0.005,
upper = 0.05, lower = 0.0005)
# Tying function
GSP = pi[0] * ( X[1]*pi[1]**X[0] +\
X[2]*(pi[1]*pi[2])**X[0] ) + pi[3]
theta = pm.Beta('θ', mu=GSP, sigma = sigma_t,
shape = ((2*len(lengths, ))) )
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p = theta, observed = Y, n = shots)
# model graph
pm.model_to_graphviz(h_model)
# sample
with h_model:
trace_h = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h_model:
az.plot_trace(trace_h);
with h_model:
az.plot_posterior(trace_h, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h_model:
# (hdi_prob=.94 is default)
azt_summary = az.summary(trace_h, round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats")
azt_summary
# mean and sigma of EPC
epc_est_a = azt_summary['mean']['EPC']
epc_est_a_err = azt_summary['sd']['EPC']
# plot
import matplotlib.pyplot as plt # if not yet imported
with h_model:
az.plot_posterior(trace_h, var_names = ["EPC"],
round_to = 4, figsize = [10,6],
textsize = 12)
Bayes_legend = "EPC SMC: {0:1.3e} ± {1:1.3e}"\
.format(epc_est_a, epc_est_a_err)
LSF_legend = "EPC LSF: {0:1.3e} ± {1:1.3e}".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
plt.axvline(x=epc_est_fm,color='cyan',ls="-")
if epc_calib != np.nan:
plt.axvline(x=epc_calib,color='r',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend,
Cal_legend), fontsize=12 )
else:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend))
plt.title(experiment_type +', ' + interleaved_gate + " qubit(s):" + str(physical_qubits)\
+', backend: '+backend.name() + "\n Bayesian "+model,
fontsize=16);
# compare LSF and SMC
print("Model: Frequentist Bayesian Calibration")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sigma ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
def calc_chisquare(ydata, sigma, ycalc):
r = ydata - ycalc
chisq = np.sum((r / sigma) ** 2)
return chisq
# prepare box for GSP plot
# perform reduced χ² value calculation for Bayes hierarchical
mean_h = trace_h.posterior.mean(dim=['chain', 'draw'])
theta_stacked = mean_h.θ.values
NDF_h = len(lengths)*2 - 4 - 1 # (-1 is for σ_Beta)
chisq_h = calc_chisquare(y_mean, sigma_y, theta_stacked)/NDF_h
texto_0 = " alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[1]'],
azt_summary['sd']['Tying_Parameters[1]'])
texto_1 =" alpha_c = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[2]'],
azt_summary['sd']['Tying_Parameters[2]'])
texto_2 = " EPC = {0:1.4e} ± {1:1.4e}"\
.format(azt_summary['mean']['EPC'],
azt_summary['sd']['EPC'])
texto_3 = " Fit χ² = {0:7.4f} "\
.format(chisq_h)
texto = texto_0 + "\n" + texto_1 + "\n" + texto_2 + "\n" + texto_3
# prepare data for GSP plot
# get the calculated GSP values
with h_model:
hdi_prob = .94
# (hdi_prob=.94 is default, roughly coreresponding to 2σ)
theta_summary = az.summary(trace_h, round_to=12, hdi_prob = hdi_prob,
var_names = ["θ"], kind="stats")
y1 = theta_summary.values[:,0][0:len(lengths)]
y2 = theta_summary.values[:,0][len(lengths):len(lengths)*2]
HDI = False # make your choice here
if HDI:
# HDI values as bounds
bounds_rmk = "(shown bounds are "+ str(int(100*hdi_prob)) + "% HDI)"
y1_min = theta_summary.values[:,2][0:len(lengths)]
y2_min = theta_summary.values[:,2][len(lengths):len(lengths)*2]
y1_max = theta_summary.values[:,3][0:len(lengths)]
y2_max = theta_summary.values[:,3][len(lengths):len(lengths)*2]
else:
# two sigma bounds for plot
bounds_rmk = "(shown bounds are ± two σ)"
sy = theta_summary.values[:,1]
y1_min = y1 - sy[0:len(lengths)]*2
y1_max = y1 + sy[0:len(lengths)]*2
y2_min = y2 - sy[len(lengths):len(lengths)*2]*2
y2_max = y2 + sy[len(lengths):len(lengths)*2]*2
# GSP plot
import matplotlib.pyplot as plt # if not yet imported
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 14}
plt.rc('font', **font)
fig, plt = plt.subplots(1, 1, figsize = [8,5])
plt.set_ylabel("P(0)")
plt.set_xlabel("Clifford Length")
plt.legend(("Standard" , "Interleaved" ),
loc = 'center right', fontsize=10)
plt.plot(lengths,y1,color="purple", marker="o", lw = 0.75)
#plt.errorbar(lengths,y1,2*sy[0:len(lengths)],
#color="purple", marker='o')
plt.fill_between(lengths, y1_min, y1_max,
alpha=.2, edgecolor='purple', facecolor= 'r')
plt.plot(lengths,y2,color="cyan", marker='^', lw = 0.75)
#plt.errorbar(lengths,y2,2*sy[len(lengths):2*len(lengths)],
#color="cyan", marker='^')
plt.fill_between(lengths, y2_min, y2_max,
alpha=.2, edgecolor='cyan', facecolor= 'cyan')
for i_seed in range(num_samples):
plt.scatter(lengths, Y1[i_seed,:]/shots,
label = "data", marker="x",color="grey")
plt.scatter(lengths, Y2[i_seed,:]/shots,
label = "data", marker="+",color="grey")
plt.legend(("Standard" , "Interleaved" ),
loc = 'center right', fontsize=10)
plt.text(lengths[-1]*0.3,0.75, texto, bbox=dict(facecolor='white'))
plt.grid()
plt.set_title(experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk,
fontsize=14);
# View result data for frequentist model
display(int_expdata2.figure(0))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import time
import copy
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import qiskit_bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# describe RB experiment
qubits = [0]
lengths = np.arange(1, 2500, 250)
num_samples = 10
seed = 194606
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend).block_for_results()
results1 = expdata1.analysis_results()
#prepare circuits
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
#run
print("start experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
expdata1 = exp1.run(backend).block_for_results()
print(" end experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
interleaved_gate = "" # for plot title
experiment_type = expdata1._data[0]['metadata']['experiment_type']
physical_qubits = expdata1._data[0]['metadata']['physical_qubits']
shots = expdata1._data[0]['shots']
# to compare ultimately: EPC and sigma(EPC) by LSF
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
# get count data and other values from expdata1
Y = bf.get_GSP_counts(expdata1._data, m_len,
range(num_samples))
X = np.copy(lengths)
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = expdata1.analysis_results()[0].value.value
print(T_priors)
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0001
alpha_Gamma = 10
beta_Gamma = 10000
h_model = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s ,upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(h_model)
# sample
with h_model:
trace_h = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h_model:
az.plot_trace(trace_h);
with h_model:
az.plot_posterior(trace_h, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h_model:
# (hdi_prob=.94 is default)
azt_summary = az.summary(trace_h, round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats")
azt_summary
# for comparison
# reference
epc_calib = np.nan
# bayesian
epc_est_a = azt_summary['mean']['EPC']
epc_est_a_err = azt_summary['sd']['EPC']
# frequentist
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
epc_title = experiment_type +', qubit(s):' + str(physical_qubits)\
+', backend: '+backend.name() + "\n Bayesian "+model
bf.plot_epc(h_model, trace_h, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# backend's recorded EPG
error_dic = RBUtils.get_error_dict_from_backend(backend, qubits)
# get the EPG values
EPG_dic = {}
REF_dic = {}
for i in range(3,6):
EPG_key = expdata1.analysis_results()[i].name
EPG_dic[EPG_key] = expdata1.analysis_results()[i].value.value
for elem in (error_dic):
if 'EPG_' + elem[1] == EPG_key:
REF_dic[EPG_key] = error_dic[elem]
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm, epc_est_a, epc_calib ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
for i, (gate,EPG) in enumerate(EPG_dic.items()):
print("{0:<12}{1:1.3e} {2:1.3e} {3:1.3e} "
.format(gate, EPG, EPG*epc_est_a/epc_est_fm,
REF_dic[gate]))
# prepare box for GSP plot
texto = "alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[1]'],
azt_summary['sd']['Tying_Parameters[1]']) + "\n"
texto += "EPC = {0:1.4e} ± {1:1.4e}"\
.format(azt_summary['mean']['EPC'],
azt_summary['sd']['EPC']) + "\n"
for i, (gate,EPG) in enumerate(EPG_dic.items()):
texto += " {0:<8} = {1:1.4e} "\
.format(gate.ljust(6), EPG*epc_est_a/epc_est_fm) + "\n"
texto += " Fit χ² = {0:7.4f} "\
.format(bf.reduced_chisquare(y_mean, sigma_y, trace_h))
bounds_rmk, y1, y1_min, y1_max = bf.prepare_data_GSP_plot(h_model, trace_h, HDI = False)
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2=None, y2_min=None, y2_max=None, Y1=Y, Y2=None,
first_curve = "Calculated values", second_curve = None)
# View data for frequentist model
display(expdata1.figure(0))
lengths = np.arange(1, 200, 30)
num_samples = 10
seed = 194606
qubits = (1,4)
# Run a 1-qubit RB expriment on qubits 1, 4 to determine the error-per-gate of 1-qubit gates
expdata_1q = {}
epg_1q = []
lengths_1_qubit = np.arange(1, 2500, 250)
for qubit in qubits:
exp = StandardRB([qubit], lengths_1_qubit, num_samples=num_samples, seed=seed)
expdata = exp.run(backend).block_for_results()
expdata_1q[qubit] = expdata
epg_1q += expdata.analysis_results()
# Run an RB experiment on qubits 1, 4
exp2 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_1q)
# Run the 2-qubit experiment
expdata2 = exp2.run(backend).block_for_results()
# View result data
results2 = expdata2.analysis_results()
# Compare the computed EPG of the cx gate with the backend's recorded cx gate error:
expected_epg = RBUtils.get_error_dict_from_backend(backend, qubits)[(qubits, 'cx')]
exp2_epg = expdata2.analysis_results("EPG_cx").value
print("Backend's reported EPG of the cx gate:", expected_epg)
print("Experiment computed EPG of the cx gate:", exp2_epg)
trace_1q = {}
scale_1q = .5
for qubit in qubits:
Y = bf.get_GSP_counts(expdata_1q[qubit]._data, len(lengths_1_qubit),
range(num_samples))
X = np.copy(lengths_1_qubit)
shots_1_qubit = expdata_1q[qubit]._data[0]['shots']
T_priors = expdata_1q[qubit].analysis_results()[0].value.value
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0001
alpha_Gamma = 10
beta_Gamma = 10000
h1_model = bf.create_model(T_priors, X, Y, shots_1_qubit, scale_1q,
testval_s = testval_s ,upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
with h1_model:
trace_h1 = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
trace_1q[qubit] = trace_h1
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
interleaved_gate = "" # for plot title
experiment_type = expdata2._data[0]['metadata']['experiment_type']
physical_qubits = expdata2._data[0]['metadata']['physical_qubits']
shots = expdata2._data[0]['shots']
# get count data and other values from expdata2
Y = bf.get_GSP_counts(expdata2._data, m_len,
range(num_samples))
X = np.copy(lengths)
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = expdata2.analysis_results()[0].value.value
print(T_priors)
testval_s = 0.0025
upper_s = 0.005
lower_s = 0.0005
alpha_Gamma = 5
beta_Gamma = 2000
h2_model = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s ,upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(h2_model)
# sample
with h2_model:
trace_h2 = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h2_model:
az.plot_trace(trace_h2);
with h2_model:
az.plot_posterior(trace_h2, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h2_model:
# (hdi_prob=.94 is default)
azt2_summary = az.summary(trace_h2, round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats")
azt2_summary
# for comparison
# reference
epc_calib = np.nan
# bayesian
epc_est_a = azt2_summary['mean']['EPC']
epc_est_a_err = azt2_summary['sd']['EPC']
# frequentist
epc_est_fm = expdata2.analysis_results()[2].value.value
epc_est_fm_err = expdata2.analysis_results()[2].value.stderr
epc_title = experiment_type +', qubit(s):' + str(physical_qubits)\
+', backend: '+backend.name() + "\n Bayesian "+model
bf.plot_epc(h2_model, trace_h2, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# obtain posterior values of the hyperparameters:
azts_1q = []
for i_qubit, qubit in enumerate(qubits):
with h2_model:
# (hdi_prob=.94 is default)
azts_1q.append( az.summary(trace_1q[qubit], round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats") )
# retrieve gates per clifford from frequentist results
alpha_1q = [epg_1q[1].value.value, epg_1q[7].value.value]
epc_2_qubit = expdata2.analysis_results()[2].value.value
alpha_c_1q = 1 / 5 * (alpha_1q[0] + alpha_1q[1] + 3 * alpha_1q[0] * alpha_1q[1])
alpha_c_2q = (1 - 4 / 3 * epc_2_qubit) / alpha_c_1q
n_gate_2q = 3 / 4 * (1 - alpha_c_2q) / exp2_epg.value
# calculate epg cx from the bayesian results
alpha_1q_b = [azts_1q[0]['mean']['Tying_Parameters[1]'],
azts_1q[1]['mean']['Tying_Parameters[1]']]
epc_2_qubit_b = azt2_summary['mean']['EPC']
alpha_c_1q_b = 1 / 5 * (alpha_1q_b[0] + alpha_1q_b[1] + 3 * alpha_1q_b[0] * alpha_1q_b[1])
alpha_c_2q_b = (1 - 4 / 3 * epc_2_qubit_b) / alpha_c_1q_b
epg_cx = 3 / 4 * (1 - alpha_c_2q_b) / n_gate_2q
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} -----"
.format(epc_est_fm, epc_est_a ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
print("EPG_cx {0:1.3e} {1:1.3e} {2:1.3e}"
.format(exp2_epg.value, epg_cx, expected_epg))
# prepare box for GSP plot
texto = " alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt2_summary['mean']['Tying_Parameters[1]'],
azt2_summary['sd']['Tying_Parameters[1]']) + "\n"
texto += " EPC = {0:1.4e} ± {1:1.4e}"\
.format(azt2_summary['mean']['EPC'],
azt2_summary['sd']['EPC']) + "\n"
texto += " EPG_cx = {0:7.4f}"\
.format(epg_cx) + "\n"
texto += " Fit χ² = {0:7.4f} "\
.format(bf.reduced_chisquare(y_mean, sigma_y, trace_h2))
bounds_rmk, y1, y1_min, y1_max = bf.prepare_data_GSP_plot(h2_model, trace_h2)
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2=None, y2_min=None, y2_max=None, Y1=Y, Y2=None,
first_curve = "Calculated values", second_curve = None)
# View result for frequentist model
display(expdata2.figure(0))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import time
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate Reference
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import qiskit_bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# describe RB experiment (accept 1-qubit or 2-qubit interleaved gate)
is_1_qubit = False
if is_1_qubit:
interleaved_gate = "x"
interleaved_circuit = circuits.XGate()
qubits = [0]
lengths = np.arange(1, 2500, 250)
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0005
alpha_Gamma = 10
beta_Gamma = 10000
else:
interleaved_gate = "cx"
interleaved_circuit = circuits.CXGate()
qubits = [1,4]
lengths = np.arange(1, 200, 15)
testval_s = 0.0025
upper_s = 0.005
lower_s = 0.0005
alpha_Gamma = 5
beta_Gamma = 2000
num_samples = 10
seed = 194606
# get the backend's referencevalue
t = None # enter t in datetime format if necessary
# use properties(datetime=t) if t is defined
e_list = dv.gate_error_values(backend.properties())
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == interleaved_gate and tuple_e[1] == qubits:
epc_calib = tuple_e[2]
print('EPC reference: {0:1.4e}'.format(epc_calib))
#prepare circuits
int_exp = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
#run
print("start experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
int_expdata = int_exp.run(backend).block_for_results()
print(" end experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
experiment_type = int_expdata._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata._data[0]['metadata']['physical_qubits']
shots = int_expdata._data[0]['shots']
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
# get count data and other values from int_expdata
Y = bf.get_GSP_counts(int_expdata._data, 2*m_len,
range(num_samples))
# get RvsI_h and IvsR_h
RvsI_h = np.ones(2*m_len)
for i_data in range(2*m_len):
if int_expdata._data[i_data]['metadata']['interleaved']:
RvsI_h[i_data] = 0.
IvsR_h = (RvsI_h + 1.) %2
X0 = np.tile(lengths,2)
X = np.vstack((X0,RvsI_h,IvsR_h))
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = int_expdata.analysis_results()[0].value.value
print(T_priors)
h_model = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s, upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(h_model)
# sample
with h_model:
trace_h = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h_model:
az.plot_trace(trace_h);
with h_model:
az.plot_posterior(trace_h, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h_model:
# (hdi_prob=.94 is default)
azt_summary = az.summary(trace_h, round_to=12,
var_names = ["Tying_Parameters","σ_Beta","EPC"],
kind="stats")
azt_summary
# for comparison
# bayesian
epc_est_a = azt_summary['mean']['EPC']
epc_est_a_err = azt_summary['sd']['EPC']
# frequentist
epc_est_fm = int_expdata.analysis_results()[3].value.value
epc_est_fm_err = int_expdata.analysis_results()[3].value.stderr
epc_title = experiment_type +', ' + interleaved_gate \
+ " qubit(s):" + str(physical_qubits)\
+', backend: '+ backend.name() + "\n Bayesian "+model
bf.plot_epc(h_model, trace_h, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
def calc_chisquare(ydata, sigma, ycalc):
r = ydata - ycalc
chisq = np.sum((r / sigma) ** 2)
return chisq
# GSP plot
# perform reduced χ² value calculation for Bayes hierarchical
mean_h = trace_h.posterior.mean(dim=['chain', 'draw'])
theta_stacked = mean_h.θ.values
NDF_h = m_len*2 - 4 - 1 # (-1 is for σ_Beta)
chisq_h = calc_chisquare(y_mean, sigma_y, theta_stacked)/NDF_h
#box:
texto = " alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[1]'],
azt_summary['sd']['Tying_Parameters[1]']) + "\n"
texto +=" alpha_c = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[2]'],
azt_summary['sd']['Tying_Parameters[2]']) + "\n"
texto +=" EPC = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['EPC'],
azt_summary['sd']['EPC']) + "\n"
texto +=" Fit χ² = {0:7.4f} "\
.format(chisq_h)
# obtain data for plot
bounds_rmk, y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2 = \
bf.prepare_two_curves_GSP_plot(h_model, trace_h, X, Y, HDI = False)
# title
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
# plot
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2,
first_curve = "Standard", second_curve = "Interleaved")
# View result for frequentist model
display(int_expdata.figure(0))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import time
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate Reference
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import qiskit_bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_bogota') # type here hardware backend
# describe RB experiment (accept 1-qubit or 2-qubit interleaved gate)
is_1_qubit = True
if is_1_qubit:
interleaved_gate = "x"
interleaved_circuit = circuits.XGate()
qubits = [0]
lengths = np.arange(1, 2500, 250)
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0005
alpha_Gamma = 10
beta_Gamma = 10000
else:
interleaved_gate = "cx"
interleaved_circuit = circuits.CXGate()
qubits = [1,4]
lengths = np.arange(1, 200, 15)
testval_s = 0.0025
upper_s = 0.005
lower_s = 0.0005
alpha_Gamma = 5
beta_Gamma = 2000
num_samples = 10
seed = 194606
# get the backend's referencevalue
t = None # enter t in datetime format if necessary
# use properties(datetime=t) if t is defined
e_list = dv.gate_error_values(backend.properties())
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == interleaved_gate and tuple_e[1] == qubits:
epc_calib = tuple_e[2]
print('EPC reference: {0:1.4e}'.format(epc_calib))
#prepare circuits
int_exp = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
#run
print("start experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
int_expdata = int_exp.run(backend).block_for_results()
print(" end experiments",time.strftime('%d/%m/%Y %H:%M:%S'))
experiment_type = int_expdata._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata._data[0]['metadata']['physical_qubits']
shots = int_expdata._data[0]['shots']
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
# get count data and other values from int_expdata
Y = bf.get_GSP_counts(int_expdata._data, 2*m_len,
range(num_samples))
# get RvsI_h and IvsR_h
RvsI_h = np.ones(2*m_len)
for i_data in range(2*m_len):
if int_expdata._data[i_data]['metadata']['interleaved']:
RvsI_h[i_data] = 0.
IvsR_h = (RvsI_h + 1.) %2
X0 = np.tile(lengths,2)
X = np.vstack((X0,RvsI_h,IvsR_h))
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = int_expdata.analysis_results()[0].value.value
print(T_priors)
h_model = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s, upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(h_model)
# sample
with h_model:
trace_h = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h_model:
az.plot_trace(trace_h);
with h_model:
az.plot_posterior(trace_h, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h_model:
# (hdi_prob=.94 is default)
azt_summary = az.summary(trace_h, round_to=12,
var_names = ["Tying_Parameters","σ_Beta","EPC"],
kind="stats")
azt_summary
# for comparison
# bayesian
epc_est_a = azt_summary['mean']['EPC']
epc_est_a_err = azt_summary['sd']['EPC']
# frequentist
epc_est_fm = int_expdata.analysis_results()[3].value.value
epc_est_fm_err = int_expdata.analysis_results()[3].value.stderr
epc_title = experiment_type +', ' + interleaved_gate \
+ " qubit(s):" + str(physical_qubits)\
+', backend: '+ backend.name() + "\n Bayesian "+model
bf.plot_epc(h_model, trace_h, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
def calc_chisquare(ydata, sigma, ycalc):
r = ydata - ycalc
chisq = np.sum((r / sigma) ** 2)
return chisq
# GSP plot
# perform reduced χ² value calculation for Bayes hierarchical
mean_h = trace_h.posterior.mean(dim=['chain', 'draw'])
theta_stacked = mean_h.θ.values
NDF_h = m_len*2 - 4 - 1 # (-1 is for σ_Beta)
chisq_h = calc_chisquare(y_mean, sigma_y, theta_stacked)/NDF_h
#box:
texto = " alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[1]'],
azt_summary['sd']['Tying_Parameters[1]']) + "\n"
texto +=" alpha_c = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[2]'],
azt_summary['sd']['Tying_Parameters[2]']) + "\n"
texto +=" EPC = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['EPC'],
azt_summary['sd']['EPC']) + "\n"
texto +=" Fit χ² = {0:7.4f} "\
.format(chisq_h)
# obtain data for plot
bounds_rmk, y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2 = \
bf.prepare_two_curves_GSP_plot(h_model, trace_h, X, Y, HDI = False)
# title
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
# plot
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2,
first_curve = "Standard", second_curve = "Interleaved")
# View result for frequentist model
display(int_expdata.figure(0))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# import the bayesian packages
import pymc3 as pm
import arviz as az
import qiskit_bayesian_fitter as bf
# define backend
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
# describe RB experiment
qubits = [0]
lengths = np.arange(1, 2500, 250)
num_samples = 10
seed = 3018
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend).block_for_results()
results1 = expdata1.analysis_results()
#prepare circuits
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
#run
expdata1 = exp1.run(backend).block_for_results()
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
interleaved_gate = "" # for plot title
experiment_type = expdata1._data[0]['metadata']['experiment_type']
physical_qubits = expdata1._data[0]['metadata']['physical_qubits']
shots = expdata1._data[0]['shots']
# to compare ultimately: EPC and sigma(EPC) by LSF
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
# get count data and other values from expdata1
Y = bf.get_GSP_counts(expdata1._data, m_len,
range(num_samples))
X = np.copy(lengths)
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = expdata1.analysis_results()[0].value.value
print(T_priors)
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0001
alpha_Gamma = 10
beta_Gamma = 10000
h_model = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s ,upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(h_model)
# sample
with h_model:
trace_h = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h_model:
az.plot_trace(trace_h);
with h_model:
az.plot_posterior(trace_h, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h_model:
# (hdi_prob=.94 is default)
azt_summary = az.summary(trace_h, round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats")
azt_summary
# for comparison
# reference
epc_calib = np.nan
# bayesian
epc_est_a = azt_summary['mean']['EPC']
epc_est_a_err = azt_summary['sd']['EPC']
# frequentist
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
epc_title = experiment_type +', qubit(s):' + str(physical_qubits)\
+', backend: '+backend.name() + "\n Bayesian "+model
bf.plot_epc(h_model, trace_h, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# backend's recorded EPG
error_dic = RBUtils.get_error_dict_from_backend(backend, qubits)
# get the EPG values
EPG_dic = {}
REF_dic = {}
for i in range(3,6):
EPG_key = expdata1.analysis_results()[i].name
EPG_dic[EPG_key] = expdata1.analysis_results()[i].value.value
for elem in (error_dic):
if 'EPG_' + elem[1] == EPG_key:
REF_dic[EPG_key] = error_dic[elem]
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm, epc_est_a, epc_calib ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
for i, (gate,EPG) in enumerate(EPG_dic.items()):
print("{0:<12}{1:1.3e} {2:1.3e} {3:1.3e} "
.format(gate, EPG, EPG*epc_est_a/epc_est_fm,
REF_dic[gate]))
# prepare box for GSP plot
texto = "alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt_summary['mean']['Tying_Parameters[1]'],
azt_summary['sd']['Tying_Parameters[1]']) + "\n"
texto += "EPC = {0:1.4e} ± {1:1.4e}"\
.format(azt_summary['mean']['EPC'],
azt_summary['sd']['EPC']) + "\n"
for i, (gate,EPG) in enumerate(EPG_dic.items()):
texto += " {0:<8} = {1:1.4e} "\
.format(gate.ljust(6), EPG*epc_est_a/epc_est_fm) + "\n"
texto += " Fit χ² = {0:7.4f} "\
.format(bf.reduced_chisquare(y_mean, sigma_y, trace_h))
bounds_rmk, y1, y1_min, y1_max = bf.prepare_data_GSP_plot(h_model, trace_h, HDI = False)
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2=None, y2_min=None, y2_max=None, Y1=Y, Y2=None,
first_curve = "Calculated values", second_curve = None)
# View data for frequentist model
display(expdata1.figure(0))
lengths = np.arange(1, 200, 15)
num_samples = 10
seed = 3018
qubits = (1,4)
# Run a 1-qubit RB expriment on qubits 1, 4 to determine the error-per-gate of 1-qubit gates
expdata_1q = {}
epg_1q = []
lengths_1_qubit = np.arange(1, 2500, 250)
for qubit in qubits:
exp = StandardRB([qubit], lengths_1_qubit, num_samples=num_samples, seed=seed)
expdata = exp.run(backend).block_for_results()
expdata_1q[qubit] = expdata
epg_1q += expdata.analysis_results()
# Run an RB experiment on qubits 1, 4
exp2 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_1q)
# Run the 2-qubit experiment
expdata2 = exp2.run(backend).block_for_results()
# View result data
results2 = expdata2.analysis_results()
# Compare the computed EPG of the cx gate with the backend's recorded cx gate error:
expected_epg = RBUtils.get_error_dict_from_backend(backend, qubits)[(qubits, 'cx')]
exp2_epg = expdata2.analysis_results("EPG_cx").value
print("Backend's reported EPG of the cx gate:", expected_epg)
print("Experiment computed EPG of the cx gate:", exp2_epg)
trace_1q = {}
scale_1q = .5
for qubit in qubits:
Y = bf.get_GSP_counts(expdata_1q[qubit]._data, len(lengths_1_qubit),
range(num_samples))
X = np.copy(lengths_1_qubit)
shots_1_qubit = expdata_1q[qubit]._data[0]['shots']
T_priors = expdata_1q[qubit].analysis_results()[0].value.value
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0001
alpha_Gamma = 10
beta_Gamma = 10000
h1_model = bf.create_model(T_priors, X, Y, shots_1_qubit, scale_1q,
testval_s = testval_s ,upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
with h1_model:
trace_h1 = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
trace_1q[qubit] = trace_h1
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
interleaved_gate = "" # for plot title
experiment_type = expdata2._data[0]['metadata']['experiment_type']
physical_qubits = expdata2._data[0]['metadata']['physical_qubits']
shots = expdata2._data[0]['shots']
# get count data and other values from expdata2
Y = bf.get_GSP_counts(expdata2._data, m_len,
range(num_samples))
X = np.copy(lengths)
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = expdata2.analysis_results()[0].value.value
print(T_priors)
testval_s = 0.0025
upper_s = 0.005
lower_s = 0.0005
alpha_Gamma = 5
beta_Gamma = 2000
h2_model = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s ,upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(h2_model)
# sample
with h2_model:
trace_h2 = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with h2_model:
az.plot_trace(trace_h2);
with h2_model:
az.plot_posterior(trace_h2, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with h2_model:
# (hdi_prob=.94 is default)
azt2_summary = az.summary(trace_h2, round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats")
azt2_summary
# for comparison
# reference
epc_calib = np.nan
# bayesian
epc_est_a = azt2_summary['mean']['EPC']
epc_est_a_err = azt2_summary['sd']['EPC']
# frequentist
epc_est_fm = expdata2.analysis_results()[2].value.value
epc_est_fm_err = expdata2.analysis_results()[2].value.stderr
epc_title = experiment_type +', qubit(s):' + str(physical_qubits)\
+', backend: '+backend.name() + "\n Bayesian "+model
bf.plot_epc(h2_model, trace_h2, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# obtain posterior values of the hyperparameters:
azts_1q = []
for i_qubit, qubit in enumerate(qubits):
with h2_model:
# (hdi_prob=.94 is default)
azts_1q.append( az.summary(trace_1q[qubit], round_to=12,
var_names = ["Tying_Parameters", "σ_Beta","EPC"],
kind="stats") )
# retrieve gates per clifford from frequentist results
alpha_1q = [epg_1q[1].value.value, epg_1q[7].value.value]
epc_2_qubit = expdata2.analysis_results()[2].value.value
alpha_c_1q = 1 / 5 * (alpha_1q[0] + alpha_1q[1] + 3 * alpha_1q[0] * alpha_1q[1])
alpha_c_2q = (1 - 4 / 3 * epc_2_qubit) / alpha_c_1q
n_gate_2q = 3 / 4 * (1 - alpha_c_2q) / exp2_epg.value
# calculate epg cx from the bayesian results
alpha_1q_b = [azts_1q[0]['mean']['Tying_Parameters[1]'],
azts_1q[1]['mean']['Tying_Parameters[1]']]
epc_2_qubit_b = azt2_summary['mean']['EPC']
alpha_c_1q_b = 1 / 5 * (alpha_1q_b[0] + alpha_1q_b[1] + 3 * alpha_1q_b[0] * alpha_1q_b[1])
alpha_c_2q_b = (1 - 4 / 3 * epc_2_qubit_b) / alpha_c_1q_b
epg_cx = 3 / 4 * (1 - alpha_c_2q_b) / n_gate_2q
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} -----"
.format(epc_est_fm, epc_est_a ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
print("EPG_cx {0:1.3e} {1:1.3e} {2:1.3e}"
.format(exp2_epg.value, epg_cx, expected_epg))
# prepare box for GSP plot
texto = " alpha = {0:7.4f} ± {1:1.4e}"\
.format(azt2_summary['mean']['Tying_Parameters[1]'],
azt2_summary['sd']['Tying_Parameters[1]']) + "\n"
texto += " EPC = {0:1.4e} ± {1:1.4e}"\
.format(azt2_summary['mean']['EPC'],
azt2_summary['sd']['EPC']) + "\n"
texto += " EPG_cx = {0:7.4f}"\
.format(epg_cx) + "\n"
texto += " Fit χ² = {0:7.4f} "\
.format(bf.reduced_chisquare(y_mean, sigma_y, trace_h2))
bounds_rmk, y1, y1_min, y1_max = bf.prepare_data_GSP_plot(h2_model, trace_h2)
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2=None, y2_min=None, y2_max=None, Y1=Y, Y2=None,
first_curve = "Calculated values", second_curve = None)
# View result for frequentist model
display(expdata2.figure(0))
interleaved_gate = "x"
interleaved_circuit = circuits.XGate()
qubits = [0]
lengths = np.arange(1, 2500, 250)
testval_s = 0.001
upper_s = 0.004
lower_s = 0.0005
alpha_Gamma = 10
beta_Gamma = 10000
num_samples = 10
seed = 41730
epc_calib = REF_dic['EPG_' + interleaved_gate]
print('EPC reference: {0:1.4e}'.format(epc_calib))
#prepare circuits
int1exp = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
#run
int1expdata = int1exp.run(backend).block_for_results()
experiment_type = int1expdata._data[0]['metadata']['experiment_type']
physical_qubits = int1expdata._data[0]['metadata']['physical_qubits']
shots = int1expdata._data[0]['shots']
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
# get count data and other values from int1expdata
Y = bf.get_GSP_counts(int1expdata._data, 2*m_len,
range(num_samples))
# get RvsI_h and IvsR_h
RvsI_h = np.ones(2*m_len)
for i_data in range(2*m_len):
if int1expdata._data[i_data]['metadata']['interleaved']:
RvsI_h[i_data] = 0.
IvsR_h = (RvsI_h + 1.) %2
X0 = np.tile(lengths,2)
X = np.vstack((X0,RvsI_h,IvsR_h))
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = int1expdata.analysis_results()[0].value.value
print(T_priors)
hv1 = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s, upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(hv1)
# sample
with hv1:
trace_hv1 = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with hv1:
az.plot_trace(trace_hv1);
with hv1:
az.plot_posterior(trace_hv1, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with hv1:
# (hdi_prob=.94 is default)
aztv1_summary = az.summary(trace_hv1, round_to=12,
var_names = ["Tying_Parameters","σ_Beta","EPC"],
kind="stats")
aztv1_summary
# for comparison
# bayesian
epc_est_a = aztv1_summary['mean']['EPC']
epc_est_a_err = aztv1_summary['sd']['EPC']
# frequentist
epc_est_fm = int1expdata.analysis_results()[3].value.value
epc_est_fm_err = int1expdata.analysis_results()[3].value.stderr
epc_title = experiment_type +', ' + interleaved_gate \
+ " qubit(s):" + str(physical_qubits)\
+', backend: '+ backend.name() + "\n Bayesian "+model
bf.plot_epc(hv1, trace_hv1, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
# for WIP
import importlib
importlib.reload(bf)
# obtain data for plot
bounds_rmk, y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2 = \
bf.prepare_two_curves_GSP_plot(hv1, trace_hv1, X, Y, HDI = False)
reduced_chisq = bf.reduced_chisquare(y_mean, sigma_y, trace_hv1)
texto = bf.get_box_interleaved(aztv1_summary, reduced_chisq)
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
# plot
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2,
first_curve = "Standard", second_curve = "Interleaved")
# View result for frequentist model
display(int1expdata.figure(0))
interleaved_gate = "cx"
interleaved_circuit = circuits.CXGate()
qubits = [1,4]
lengths = np.arange(1, 200, 15)
testval_s = 0.0025
upper_s = 0.005
lower_s = 0.0005
alpha_Gamma = 5
beta_Gamma = 2000
num_samples = 10
seed = 3018
epc_calib = expected_epg
print('EPC reference: {0:1.4e}'.format(epc_calib))
#prepare circuits
int_exp = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
#run
int_expdata = int_exp.run(backend).block_for_results()
experiment_type = int_expdata._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata._data[0]['metadata']['physical_qubits']
shots = int_expdata._data[0]['shots']
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
m_len = len(lengths)
# get count data and other values from int_expdata
Y = bf.get_GSP_counts(int_expdata._data, 2*m_len,
range(num_samples))
# get RvsI_h and IvsR_h
RvsI_h = np.ones(2*m_len)
for i_data in range(2*m_len):
if int_expdata._data[i_data]['metadata']['interleaved']:
RvsI_h[i_data] = 0.
IvsR_h = (RvsI_h + 1.) %2
X0 = np.tile(lengths,2)
X = np.vstack((X0,RvsI_h,IvsR_h))
y_mean = np.mean(Y, axis = 0)/shots
sigma_y = np.std(Y, axis = 0)/shots
model = "hierarchical model"
# priors for unknown model parameters
T_priors = int_expdata.analysis_results()[0].value.value
print(T_priors)
hv2 = bf.create_model(T_priors, X, Y, shots, scale,
testval_s = testval_s, upper_s = upper_s, lower_s = lower_s,
s_prior = "Gamma", alpha_Gamma = alpha_Gamma, beta_Gamma = beta_Gamma)
# model graph
pm.model_to_graphviz(hv2)
# sample
with hv2:
trace_hv2 = pm.sample(draws = 4000, tune= 1000, target_accept=.99,
return_inferencedata=True)
with hv2:
az.plot_trace(trace_hv2);
with hv2:
az.plot_posterior(trace_hv2, var_names = ["Tying_Parameters","σ_Beta","EPC"], round_to = 4, figsize = [16, 8]);
# look at the posterior values of the hyperparameters:
with hv2:
# (hdi_prob=.94 is default)
aztv2_summary = az.summary(trace_hv2, round_to=12,
var_names = ["Tying_Parameters","σ_Beta","EPC"],
kind="stats")
aztv2_summary
# for comparison
# bayesian
epc_est_a = aztv2_summary['mean']['EPC']
epc_est_a_err = aztv2_summary['sd']['EPC']
# frequentist
epc_est_fm = int_expdata.analysis_results()[3].value.value
epc_est_fm_err = int_expdata.analysis_results()[3].value.stderr
epc_title = experiment_type +', ' + interleaved_gate \
+ " qubit(s):" + str(physical_qubits)\
+', backend: '+ backend.name() + "\n Bayesian "+model
bf.plot_epc(hv2, trace_hv2, epc_calib, epc_est_a,
epc_est_a_err, epc_est_fm, epc_est_fm_err, epc_title)
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sd ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
# obtain data for plot
bounds_rmk, y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2 = \
bf.prepare_two_curves_GSP_plot(hv2, trace_hv2, X, Y, HDI = False)
reduced_chisq = bf.reduced_chisquare(y_mean, sigma_y, trace_hv2)
texto = bf.get_box_interleaved(aztv2_summary, reduced_chisq)
title = experiment_type +', ' + interleaved_gate\
+ str(physical_qubits)\
+', backend: '+backend.name()+\
"\n Bayesian "+model+" "+ bounds_rmk
# plot
bf.gsp_plot(scale, lengths, num_samples, shots, texto, title,
y1, y1_min, y1_max, y2, y2_min, y2_max, Y1, Y2,
first_curve = "Standard", second_curve = "Interleaved")
# View result for frequentist model
display(int_expdata.figure(0))
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
import qiskit_experiments as qe
import qiskit.circuit.library as circuits
rb = qe.randomized_benchmarking
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_lima') # type here hardware backend
lengths = np.arange(1, 1000, 100)
num_samples = 10
seed = 1010
qubits = [0]
physical_qubits = [0]
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate =''
# Run an RB experiment on a single qubit
exp1 = rb.StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend)
# retrieve from the frequentist model (fm) analysis
# some values,including priors, for the bayesian analysis
perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(expdata1)
EPG_dic = expdata1._analysis_results[0]['EPG'][qubits[0]]
# get count data
Y = bf.get_GSP_counts(expdata1._data, len(lengths),range(num_samples))
shots = bf.guess_shots(Y)
#build model
original_model = bf.get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]))
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model)
# backend's recorded EPG
print(rb.RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(original_model, trace_o, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic)
# describe RB experiment
interleaved_gate =''
physical_qubits = qubits = (1,4)
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ # defined for the 2-qubit run
lengths = np.arange(1, 200, 30)
lengths_1_qubit = np.arange(1, 1000, 100)
num_samples = 10
seed = 1010
# Run a 1-qubit RB expriment on each qubit to determine the error-per-gate of 1-qubit gates
epg_data = {}
expdata_dic = {}
lengths_1_qubit = np.arange(1, 1000, 100)
for qubit in qubits:
exp = rb.StandardRB([qubit], lengths_1_qubit, num_samples=num_samples, seed=seed)
expdata_dic[qubit] = exp.run(backend)
epg_data[qubit] = expdata_dic[qubit].analysis_result(0)['EPG'][qubit]
# Run a 1-qubit RB bayesian analysis on each qubit to determine the bayesian error-per-gate of 1-qubit gates
epg_data_bayes = {}
for qubit in qubits:
# retrieve from the frequentist model (fm) analysis
# some values,including priors, for the bayesian analysis
perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(expdata_dic[qubit])
Y = bf.get_GSP_counts(expdata_dic[qubit]._data, len(lengths_1_qubit),range(num_samples))
shots = bf.guess_shots(Y)
#build and run model
oneQ_model = bf.get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths_1_qubit,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]))
trace_oneQ = bf.get_trace(oneQ_model)
azoneQ_summary = bf.get_summary(oneQ_model, trace_oneQ, kind = 'stats')
print(azoneQ_summary,'\n')
epc_est_a = 0.5*(1 - azoneQ_summary['mean']['alpha']) # there is only one qubit, thus scale = 0.5
epg_data_bayes[qubit] = {}
for i, (gate,EPG) in enumerate(epg_data[qubit].items()):
epg_data_bayes[qubit][gate] = EPG*epc_est_a/epc_est_fm
# Run a frequentist RB experiment on 2 qubits
exp2 = rb.StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_data)
# Run the 2-qubit experiment
expdata2 = exp2.run(backend)
# Compare the computed EPG with the backend's recorded EPG:
expected_epg = rb.RBUtils.get_error_dict_from_backend(backend, qubits)[(qubits, 'cx')]
exp2_epg = expdata2.analysis_result(0)['EPG'][qubits]['cx']
print("Backend's reported EPG:", expected_epg)
print("Experiment computed EPG:", exp2_epg)
# Bayesian version
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_data_bayes)
# Run the 2-qubit experiment
expdata2b = exp2.run(backend)
# Compare the computed EPG with the backend's recorded EPG:
exp2_epg_b = expdata2b.analysis_result(0)['EPG'][qubits]['cx']
print("Backend's reported EPG:", expected_epg)
print("Experiment computed EPG:", exp2_epg_b)
# retrieve from the frequentist model (fm) analysis
# some values,including priors, for the bayesian analysis
perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(expdata2b)
EPG_dic = expdata2b._analysis_results[0]['EPG'][qubits]
# get count data
Y = bf.get_GSP_counts(expdata2b._data, len(lengths),range(num_samples))
shots = bf.guess_shots(Y)
#build model
S2QB_model = bf.get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]))
pm.model_to_graphviz(S2QB_model)
trace_s2 = bf.get_trace(S2QB_model)
bf.RB_bayesian_results(S2QB_model, trace_s2, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic)
# describe RB experiment
interleaved_gate = "x"
physical_qubits = [0]
qubits = [0]
interleaved_circuit = circuits.XGate()
lengths = np.arange(1, 1000, 100)
num_samples = 10
seed = 1010
# Run an interleaved RB experiment
int_exp1 = rb.InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
int_expdata1 = int_exp1.run(backend)
# retrieve from the frequentist model (fm) analysis
# some values,including priors, for the bayesian analysis
perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(int_expdata1)
nQ = len(physical_qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
Y1 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(1,2*num_samples,2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
shots = bf.guess_shots(Y)
tilde1 = bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
alpha_ref=popt_fm[1], p_testval= popt_fm[2],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
p_lower=popt_fm[2]-6*perr_fm[2],
p_upper=min(1.-1.E-6,popt_fm[2]+6*perr_fm[2]),
mu_AB=[popt_fm[0],popt_fm[3]],cov_AB=[perr_fm[0],perr_fm[3]],
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde1)
trace_t = bf.get_trace(tilde1)
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = [item for item in e_list if item[0] == interleaved_gate and item[1] == physical_qubits][0][2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
# epc_calib = 2.307E-4 + (23.6-7)*(2.193E-4 - 2.307E-4)/24
bf.RB_bayesian_results(tilde1, trace_t, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2)
# describe RB experiment
interleaved_gate = "cx"
physical_qubits = [1,4]
qubits = [1,4]
interleaved_circuit = circuits.CXGate()
lengths = np.arange(1, 200, 30)
num_samples = 10
seed = 1010
# Run an Interleaved RB experiment
interleaved_circuit = circuits.CXGate()
int_exp2 = rb.InterleavedRB(interleaved_circuit, physical_qubits,
lengths, num_samples=num_samples, seed=seed)
int_expdata2 = int_exp2.run(backend)
# retrieve from the frequentist model (fm) analysis
# some values,including priors, for the bayesian analysis
perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(int_expdata2)
nQ = len(physical_qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
Y1 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(1,2*num_samples,2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
shots = bf.guess_shots(Y1)
tilde2 = bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
alpha_ref=popt_fm[1], p_testval= popt_fm[2],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
p_lower=popt_fm[2]-6*perr_fm[2],
p_upper=min(1.-1.E-6,popt_fm[2]+6*perr_fm[2]),
mu_AB=[popt_fm[0],popt_fm[3]],cov_AB=[perr_fm[0],perr_fm[3]],
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde2)
trace_t2 = bf.get_trace(tilde2)
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = [item for item in e_list if item[0] == interleaved_gate and item[1] == physical_qubits][0][2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
bf.RB_bayesian_results(tilde2, trace_t2, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2)
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
import qiskit_experiments as qe
import qiskit.circuit.library as circuits
rb = qe.randomized_benchmarking
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_lima') # type here hardware backend
import importlib
importlib.reload(bf)
lengths = np.arange(1, 1000, 100)
num_samples = 10
seed = 1010
qubits = [0]
# Run an RB experiment on qubit 0
exp1 = rb.StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend)
# View result data
print(expdata1)
physical_qubits = [0]
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate =''
# retrieve from the frequentist model (fm) analysis
# some values,including priors, for the bayesian analysis
perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type = bf.retrieve_from_lsf(expdata1)
EPG_dic = expdata1._analysis_results[0]['EPG'][qubits[0]]
# get count data
Y = bf.get_GSP_counts(expdata1._data, len(lengths),range(num_samples))
shots = bf.guess_shots(Y)
#build model
pooled_model = bf.get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]))
pm.model_to_graphviz(pooled_model)
trace_p = bf.get_trace(pooled_model, target_accept = 0.95)
# backend's recorded EPG
print(rb.RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(pooled_model, trace_p, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic)
#build model
original_model = bf.get_bayesian_model(model_type="h_sigma",Y=Y,shots=shots,m_gates=lengths,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model, target_accept = 0.95)
# backend's recorded EPG
print(rb.RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(original_model, trace_o, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic)
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
#from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
RB_process = "1_Q RB"
if RB_process in ["3_Q RB","2-3_Q RB"] :
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[1,2],[3]] # because 3 qubits
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
#Interleaved Clifford gates (2-qubits and 1-qubit)
interleaved_gates = [['cx 0 1'],['x 2']]
elif RB_process == "2_Q RB":
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
interleaved_gates = [['cx 0,1']]
elif RB_process == "1_Q RB":
#Number of qubits
nQ = 1
#There is 1 qubit: Q0
rb_pattern = [[0]]
length_multiplier = 1
interleaved_gates = [['sx 0']]
#Important parameters
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = [1, 50, 100, 200, 400, 600, 800, 1000, 1300, 1600]
#Number of seeds (random sequences)
nseeds=8
#Shots
shots = 2**9
scale = (2 ** len(rb_pattern[0]) - 1) / (2 ** len(rb_pattern[0]))
from qiskit import IBMQ
from qiskit import Aer
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_athens') # type here hardware backend
properties = device.properties()
coupling_map = device.configuration().coupling_map
basis_gates = ['id', 'rz', 'sx', 'x', 'cx', 'reset']
hardware = device.name()
run_option = "real" # "simulation"
# choice of simulation or real device run
if run_option == "real":
backend = device
noise_model = None
elif run_option == "simulation":
backend = Aer.get_backend('qasm_simulator')
noise_model = NoiseModel.from_backend(properties)
qregs_02 = QuantumRegister(2)
circ_02 = QuantumCircuit(qregs_02, name='circ_02')
#circ_02.h(qregs_02[0]) # booptrap! WIP!
circ_02.cx(qregs_02[0], qregs_02[1])
circ_02.draw()
qregs_1 = QuantumRegister(1)
circ_1 = QuantumCircuit(qregs_1, name='circ_1')
circ_1.sx(qregs_1[0]) # booptrap! WIP!
circ_1.draw()
rb_opts = {}
rb_opts['rand_seed'] = 61946
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
#rb_opts['align_cliffs'] = True
if RB_process in ["3_Q RB","2-3_Q RB"]:
rb_opts['interleaved_elem'] = [circ_02, circ_1]
elif RB_process == "2_Q RB":
rb_opts['interleaved_elem'] = [circ_02]
elif RB_process == "1_Q RB":
rb_opts['interleaved_elem'] = [circ_1]
rb_original_circs, xdata, rb_interleaved_circs = rb.randomized_benchmarking_seq(**rb_opts)
#Original RB circuits
print (rb_original_circs[0][0])
#Interleaved RB circuits
print (rb_interleaved_circs[0][0])
retrieve_list = []
original_result_list, original_transpile_list = bf.get_and_run_seeds(rb_circs=rb_original_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
retrieve_list = []
interleaved_result_list, interleaved_transpile_list = bf.get_and_run_seeds(rb_circs=rb_interleaved_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
epc_ref = 0.0003191
# skip if no model rerun
Y1 =np.array([[507, 503, 493, 478, 445, 423, 394, 390, 344, 310],
[506, 505, 497, 480, 447, 439, 415, 401, 383, 354],
[509, 501, 501, 487, 453, 443, 422, 414, 369, 352],
[511, 507, 497, 483, 450, 424, 397, 374, 332, 322],
[508, 505, 493, 482, 452, 452, 427, 408, 366, 339],
[511, 503, 490, 480, 456, 435, 418, 399, 369, 364],
[510, 499, 495, 484, 432, 409, 417, 374, 374, 350],
[508, 503, 487, 484, 470, 440, 416, 369, 363, 338]])
# skip if no model rerun
Y2 = np.array([[508, 494, 470, 462, 406, 376, 359, 360, 324, 334],
[509, 489, 477, 450, 421, 395, 356, 340, 316, 300],
[509, 494, 473, 455, 426, 397, 355, 327, 317, 320],
[510, 498, 475, 444, 406, 355, 326, 323, 278, 271],
[508, 492, 479, 455, 411, 389, 358, 335, 306, 307],
[509, 497, 487, 448, 412, 365, 351, 328, 304, 292],
[512, 499, 483, 455, 395, 390, 345, 331, 292, 306],
[509, 498, 475, 451, 408, 388, 366, 344, 316, 300]])
# function to optimize
def lsf(x, a, alpha, b):
return a * alpha ** x + b
# curve fit
popt_s,pcov_s = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y1)/shots,
bounds = ([scale-0.15,.9,1-scale-.15],
[scale+0.15,1.0,1-scale+.15]))
perr_s= np.sqrt(np.diag(pcov_s))
# get EPC and EPC sigma for LSF accelerated
alpha_f = popt_s[1]
alpha_f_err = perr_s[1]
popt_s,perr_s
# curve fit
popt_i,pcov_i = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y2)/shots,
bounds = ([scale-0.15,.9,1-scale-.15],
[scale+0.15,1.0,1-scale+.15]))
perr_i= np.sqrt(np.diag(pcov_i))
# get EPC and EPC sigma for LSF accelerated
alphC_f = popt_i[1]
alphC_f_err = perr_i[1]
popt_i,perr_i
epc_est_f = scale*(1 - alphC_f/alpha_f)
epc_est_f_err = scale*(alphC_f/alpha_f)*(np.sqrt(alpha_f_err**2 + alphC_f_err**2))
# function to optimize
def lsmf(x, a, alpha, p_tilde_m, b):
return x[1]*(a * alpha ** x[0] + b) + x[2]*(a * (alpha*p_tilde_m) ** x[0] + b)
# obtain the data
m_len = len(nCliffs)*nseeds
x0_lsmf = np.array(nseeds*2*list(nCliffs))
x1_lsmf = np.hstack((np.ones(m_len),np.zeros(m_len)))
x2_lsmf = np.hstack((np.zeros(m_len),np.ones(m_len)))
x_lsmf = np.vstack((x0_lsmf,x1_lsmf,x2_lsmf))
y_lsmf=np.hstack((np.ravel(Y1),np.ravel(Y2)))/shots
# curve fit
popt_m,pcov_m = curve_fit(lsmf, x_lsmf, y_lsmf,
bounds = ([scale-0.15,.9,.9,1-scale-.15],
[scale+0.15,1.0,1.0,1-scale+.15]))
perr_m = np.sqrt(np.diag(pcov_m))
# get EPC and EPC sigma for LSF accelerated
alpha_fm = popt_m[1]
p_tilde_m = popt_m[2]
alpha_fm_err = perr_m[1]
p_tilde_m_err = perr_m[2]
popt_m,perr_m
epc_est_fm = scale*(1 - p_tilde_m)
epc_est_fm_err = scale*p_tilde_m_err
# compare LSF and Reference
print("Model: Frequentist Reference")
print(" two-run accelerated")
print("EPC {0:.6f} {1:.6f} {2:.6f} "
.format(epc_est_f, epc_est_fm, epc_ref))
print("± sigma ± {0:.6f} ± {1:.6f} ------ "
.format(epc_est_f_err, epc_est_fm_err))
original_model = bf.get_bayesian_model(model_type="pooled",Y=Y1,shots=shots,m_gates=nCliffs,
mu_AB=[popt_s[0],popt_s[2]],cov_AB=[perr_s[0],perr_s[2]],
alpha_ref=alpha_f, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model, target_accept = .99)
azo_summary = bf.get_summary(original_model, trace_o)
azo_summary
alpha_original_p = azo_summary['mean']['alpha']
alpha_original_p_err = azo_summary['sd']['alpha']
interleaved_model = bf.get_bayesian_model(model_type="pooled",Y=Y2,shots=shots,m_gates=nCliffs,
mu_AB=[popt_i[0],popt_i[2]],cov_AB=[perr_i[0],perr_i[2]],
alpha_ref=alpha_f, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(interleaved_model)
trace_i = bf.get_trace(interleaved_model, target_accept = .95)
azi_summary = bf.get_summary(interleaved_model, trace_i)
azi_summary
alpha_c_p = azi_summary['mean']['alpha']
alpha_c_p_err = azi_summary['sd']['alpha']
epc_est_p = scale*(1 - alpha_c_p/alpha_original_p)
epc_est_p_err = scale*(alpha_c_p/alpha_original_p)*(np.sqrt(alpha_original_p_err**2 + alpha_c_p_err**2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde =bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=nCliffs,
alpha_ref=alpha_fm, p_testval= p_tilde_m,
mu_AB=[popt_m[0],popt_m[3]],cov_AB=[perr_m[0],perr_m[3]],
RvsI=RvsI,IvsR=IvsR, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(tilde)
trace_t = bf.get_trace(tilde, target_accept = .95)
azt_summary = bf.get_summary(tilde, trace_t)
azt_summary
epc_est_a = scale*(1 - azt_summary['mean']['p_tilde'])
epc_est_a_err = scale* (azt_summary['sd']['p_tilde'])
# compare LSF and SMC
print("Model: Frequentist Bayesian Reference")
print(" two-run accelerated two-run accelerated ")
print("EPC {0:.6f} {1:.6f} {2:.6f} {3:.6f} {4:.6f} "
.format(epc_est_f ,epc_est_fm, epc_est_p, epc_est_a, epc_ref))
print("± sigma ± {0:.6f} ± {1:.6f} ± {2:.6f} ± {3:.6f} ------ "
.format(epc_est_f_err, epc_est_fm_err, epc_est_p_err, epc_est_a_err))
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# for refering the interleaved gate in the title of the graphs
intl_g=str(interleaved_gates[0][0][0:2])+str(rb_pattern[0][0:2])
if RB_process in ["3_Q RB","2-3_Q RB"] :
intl_g=intl_g+"<"+str(interleaved_gates[1][0][0:1]+str(rb_pattern[1][0:2]))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
with tilde:
ax = az.plot_posterior(trace_t, var_names=['p_tilde'], round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(0.0, 0.0005)
plt.axvline(x=epc_est_fm,color='red',ls="-")
plt.axvline(x=epc_est_p,color='orange',ls="-")
plt.axvline(x=epc_est_f,color='cyan',ls="-")
if epc_ref > 0.0:
plt.axvline(x=epc_ref,color='green',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(RB_process +' $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=12)
Bayes_legend = "EPC Accelerated SMC: {0:1.3e} ({1:1.3e})".format(epc_est_a, epc_est_a_err)
Bayes2_legend = "EPC SMC 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_p, epc_est_p_err)
Fitter_legend = "EPC LSF 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_f, epc_est_f_err)
LSM_legend = "EPC Accelerated LSF: {0:1.3e} ({1:1.3e})".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Reference: {0:1.3e}".format(epc_ref)
if epc_ref > 0.0:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend,Bayes2_legend,
Fitter_legend,Cal_legend), fontsize=10 )
else:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend, Bayes2_legend,
Fitter_legend), fontsize=10 )
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(nseeds):
plt.scatter(nCliffs, Y1[i_seed,:]/shots, label = "data", marker="x",color="b")
plt.scatter(nCliffs, Y2[i_seed,:]/shots, label = "data", marker="+",color="r")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**nCliffs+\
azt_summary['mean']['AB[1]'],'--',color="b")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*(azt_summary['mean']['alpha']*azt_summary['mean']['p_tilde'])**\
nCliffs+azt_summary['mean']['AB[1]'],'--',color="r")
plt.legend(("Standard",
"Interleaved"))
plt.set_title(RB_process +' SMC $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=14);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
#from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
RB_process = "1_Q RB"
if RB_process in ["3_Q RB","2-3_Q RB"] :
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[1,2],[3]] # because 3 qubits
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
#Interleaved Clifford gates (2-qubits and 1-qubit)
interleaved_gates = [['cx 0 1'],['x 2']]
elif RB_process == "2_Q RB":
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
interleaved_gates = [['cx 0,1']]
elif RB_process == "1_Q RB":
#Number of qubits
nQ = 1
#There is 1 qubit: Q0
rb_pattern = [[0]]
length_multiplier = 1
interleaved_gates = [['x 0']]
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = [1, 50, 100, 200, 400, 600, 800, 1000, 1300, 1600]
#Number of seeds (random sequences)
nseeds=8
scale = (2 ** len(rb_pattern[0]) - 1) / (2 ** len(rb_pattern[0]))
from qiskit import IBMQ
from qiskit import Aer
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_lima') # type here hardware backend
properties = device.properties()
coupling_map = device.configuration().coupling_map
# use a noise model corresponding to the chosen real device backend
basis_gates = ['id', 'rz', 'sx', 'x', 'cx', 'reset']
hardware = device.name()
backend = Aer.get_backend('qasm_simulator')
shots = 2**9
noise_model = NoiseModel.from_backend(properties)
qregs_02 = QuantumRegister(2)
circ_02 = QuantumCircuit(qregs_02, name='circ_02')
#circ_02.h(qregs_02[0]) # booptrap! WIP!
circ_02.cx(qregs_02[0], qregs_02[1])
circ_02.draw()
qregs_1 = QuantumRegister(1)
circ_1 = QuantumCircuit(qregs_1, name='circ_1')
circ_1.x(qregs_1[0]) # booptrap! WIP!
circ_1.draw()
rb_opts = {}
rb_opts['rand_seed'] = 61946
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
#rb_opts['align_cliffs'] = True
if RB_process in ["3_Q RB","2-3_Q RB"]:
rb_opts['interleaved_elem'] = [circ_02, circ_1]
elif RB_process == "2_Q RB":
rb_opts['interleaved_elem'] = [circ_02]
elif RB_process == "1_Q RB":
rb_opts['interleaved_elem'] = [circ_1]
rb_original_circs, xdata, rb_interleaved_circs = rb.randomized_benchmarking_seq(**rb_opts)
#Original RB circuits
print (rb_original_circs[0][0])
#Interleaved RB circuits
print (rb_interleaved_circs[0][0])
retrieve_list = []
original_result_list, original_transpile_list = bf.get_and_run_seeds(rb_circs=rb_original_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
retrieve_list = []
interleaved_result_list, interleaved_transpile_list = bf.get_and_run_seeds(rb_circs=rb_interleaved_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
# retrieve counts; skip if model rerun
# if model rerun, recuperate output data by running Y1 and Y2 = np.printed_array instead of this cell
Y1 = bf.get_count_data(original_result_list, nCliffs=nCliffs )
Y2 = bf.get_count_data(interleaved_result_list, nCliffs=nCliffs)
# output np.array Y1; skip if model rerun
Y1
# output np.array Y1; skip if model rerun
Y2
# skip if no model rerun
Y1 =np.array([[509, 504, 493, 474, 448, 417, 406, 391, 392, 367],
[507, 498, 495, 485, 461, 440, 421, 370, 382, 353],
[507, 499, 491, 469, 437, 433, 400, 395, 373, 359],
[507, 500, 484, 477, 451, 435, 399, 365, 374, 355],
[508, 500, 494, 477, 446, 429, 401, 394, 365, 369],
[506, 493, 496, 484, 457, 430, 401, 413, 374, 372],
[506, 500, 490, 472, 444, 435, 389, 389, 372, 337],
[506, 498, 492, 485, 462, 427, 392, 385, 368, 359]])
# skip if no model rerun
Y2 = np.array([[505, 488, 482, 444, 406, 377, 330, 310, 327, 270],
[507, 487, 485, 460, 414, 363, 354, 346, 297, 296],
[509, 498, 476, 444, 414, 386, 355, 347, 312, 291],
[509, 486, 478, 453, 413, 368, 354, 334, 306, 304],
[509, 494, 483, 445, 405, 366, 338, 356, 278, 268],
[508, 492, 474, 466, 417, 357, 340, 327, 321, 284],
[507, 490, 483, 452, 419, 390, 359, 341, 306, 293],
[509, 494, 477, 441, 392, 368, 357, 331, 324, 278]])
# function to optimize
def lsf(x, a, alpha, b):
return a * alpha ** x + b
# curve fit
popt_s,pcov_s = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y1)/shots,
bounds = ([scale-0.15,.9,1-scale-.15],
[scale+0.15,1.0,1-scale+.15]))
perr_s= np.sqrt(np.diag(pcov_s))
# get EPC and EPC sigma for LSF accelerated
alpha_f = popt_s[1]
alpha_f_err = perr_s[1]
popt_s,perr_s
# curve fit
popt_i,pcov_i = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y2)/shots,
bounds = ([scale-0.15,.9,1-scale-.15],
[scale+0.15,1.0,1-scale+.15]))
perr_i= np.sqrt(np.diag(pcov_i))
# get EPC and EPC sigma for LSF accelerated
alphC_f = popt_i[1]
alphC_f_err = perr_i[1]
popt_i,perr_i
epc_est_f = scale*(1 - alphC_f/alpha_f)
epc_est_f_err = scale*(alphC_f/alpha_f)*(np.sqrt(alpha_f_err**2 + alphC_f_err**2))
# function to optimize
def lsmf(x, a, alpha, p_tilde_m, b):
return x[1]*(a * alpha ** x[0] + b) + x[2]*(a * (alpha*p_tilde_m) ** x[0] + b)
# obtain the data
m_len = len(nCliffs)*nseeds
x0_lsmf = np.array(nseeds*2*list(nCliffs))
x1_lsmf = np.hstack((np.ones(m_len),np.zeros(m_len)))
x2_lsmf = np.hstack((np.zeros(m_len),np.ones(m_len)))
x_lsmf = np.vstack((x0_lsmf,x1_lsmf,x2_lsmf))
y_lsmf=np.hstack((np.ravel(Y1),np.ravel(Y2)))/shots
# curve fit
popt_m,pcov_m = curve_fit(lsmf, x_lsmf, y_lsmf,
bounds = ([scale-0.15,.9,.9,1-scale-.15],
[scale+0.15,1.0,1.0,1-scale+.15]))
perr_m = np.sqrt(np.diag(pcov_m))
# get EPC and EPC sigma for LSF accelerated
alpha_fm = popt_m[1]
p_tilde_m = popt_m[2]
alpha_fm_err = perr_m[1]
p_tilde_m_err = perr_m[2]
popt_m,perr_m
epc_est_fm = scale*(1 - p_tilde_m)
epc_est_fm_err = scale*p_tilde_m_err
original_model = bf.get_bayesian_model(model_type="pooled",Y=Y1,shots=shots,m_gates=nCliffs,
mu_AB=[popt_s[0],popt_s[2]],cov_AB=[perr_s[0],perr_s[2]],
alpha_ref=alpha_f, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model, target_accept = .95)
azo_summary = bf.get_summary(original_model, trace_o)
azo_summary
alpha_original_p = azo_summary['mean']['alpha']
alpha_original_p_err = azo_summary['sd']['alpha']
interleaved_model = bf.get_bayesian_model(model_type="pooled",Y=Y2,shots=shots,m_gates=nCliffs,
mu_AB=[popt_i[0],popt_i[2]],cov_AB=[perr_i[0],perr_i[2]],
alpha_ref=alpha_f, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(interleaved_model)
trace_i = bf.get_trace(interleaved_model, target_accept = .95)
azi_summary = bf.get_summary(interleaved_model, trace_i)
azi_summary
alpha_c_p = azi_summary['mean']['alpha']
alpha_c_p_err = azi_summary['sd']['alpha']
epc_est_p = scale*(1 - alpha_c_p/alpha_original_p)
epc_est_p_err = scale*(alpha_c_p/alpha_original_p)*(np.sqrt(alpha_original_p_err**2 + alpha_c_p_err**2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde =bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=nCliffs,
alpha_ref=alpha_fm, p_testval= p_tilde_m,
mu_AB=[popt_m[0],popt_m[3]],cov_AB=[perr_m[0],perr_m[3]],
RvsI=RvsI,IvsR=IvsR, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(tilde)
trace_t = bf.get_trace(tilde, target_accept = .95)
azt_summary = bf.get_summary(tilde, trace_t)
azt_summary
epc_est_a = scale*(1 - azt_summary['mean']['p_tilde'])
epc_est_a_err = scale* (azt_summary['sd']['p_tilde'])
epc_calib = 3.364E-04 # not for simulation
# compare LSF and SMC
print("Model: Frequentist Bayesian ")
print(" two-run accelerated two-run accelerated ")
print("EPC {0:.5f} {1:.5f} {2:.5f} {3:.5f} "
.format(epc_est_f, epc_est_fm, epc_est_p, epc_est_a))
print("± sigma ± {0:.5f} ± {1:.5f} ± {2:.5f} ± {3:.5f} "
.format(epc_est_f_err, epc_est_fm_err, epc_est_p_err, epc_est_a_err))
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# for refering the interleaved gate in the title of the graphs
intl_g=str(interleaved_gates[0][0][0:2])+str(rb_pattern[0][0:2])
if RB_process in ["3_Q RB","2-3_Q RB"] :
intl_g=intl_g+"<"+str(interleaved_gates[1][0][0:1]+str(rb_pattern[1][0:2]))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
with tilde:
ax = az.plot_posterior(trace_t, var_names=['p_tilde'], round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(0.0, 0.0005)
plt.axvline(x=epc_est_fm,color='red',ls="-")
plt.axvline(x=epc_est_p,color='orange',ls="-")
plt.axvline(x=epc_est_f,color='cyan',ls="-")
if epc_calib > 0.0:
plt.axvline(x=epc_calib,color='green',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(RB_process +' $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=12)
Bayes_legend = "EPC Accelerated SMC: {0:1.3e} ({1:1.3e})".format(epc_est_a, epc_est_a_err)
Bayes2_legend = "EPC SMC 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_p, epc_est_p_err)
Fitter_legend = "EPC LSF 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_f, epc_est_f_err)
LSM_legend = "EPC Accelerated LSF: {0:1.3e} ({1:1.3e})".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend,Bayes2_legend,
Fitter_legend,Cal_legend), fontsize=10 )
else:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend, Bayes2_legend,
Fitter_legend), fontsize=10 )
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(nseeds):
plt.scatter(nCliffs, Y1[i_seed,:]/shots, label = "data", marker="x",color="b")
plt.scatter(nCliffs, Y2[i_seed,:]/shots, label = "data", marker="+",color="r")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**nCliffs+\
azt_summary['mean']['AB[1]'],'--',color="b")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*(azt_summary['mean']['alpha']*azt_summary['mean']['p_tilde'])**\
nCliffs+azt_summary['mean']['AB[1]'],'--',color="r")
plt.legend(("Standard",
"Interleaved"))
plt.set_title(RB_process +' SMC $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=14);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
#from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
RB_process = "2_Q RB"
if RB_process in ["3_Q RB","2-3_Q RB"] :
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[0,1],[2]] # because 3 qubits
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
#Interleaved Clifford gates (2-qubits and 1-qubit)
interleaved_gates = [['cx 0 1'],['x 2']]
else:
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
interleaved_gates = [['cx 0 1']]
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = [1, 20, 40, 60, 80, 100, 150, 200, 250, 300]
#Number of seeds (random sequences)
nseeds=8
scale = (2 ** len(rb_pattern[0]) - 1) / (2 ** len(rb_pattern[0]))
qregs_02 = QuantumRegister(2)
circ_02 = QuantumCircuit(qregs_02, name='circ_02')
#circ_02.h(qregs_02[0]) # booptrap! WIP!
circ_02.cx(qregs_02[0], qregs_02[1])
circ_02.draw()
qregs_1 = QuantumRegister(1)
circ_1 = QuantumCircuit(qregs_1, name='circ_1')
circ_1.x(qregs_1[0]) # booptrap! WIP!
circ_1.draw()
rb_opts = {}
rb_opts['rand_seed'] = 61946
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
#rb_opts['align_cliffs'] = True
if RB_process in ["3_Q RB","2-3_Q RB"]:
rb_opts['interleaved_elem'] = [circ_02, circ_1]
if RB_process == "2_Q RB":
rb_opts['interleaved_elem'] = [circ_02]
rb_original_circs, xdata, rb_interleaved_circs = rb.randomized_benchmarking_seq(**rb_opts)
#Original RB circuits
print (rb_original_circs[0][0])
#Interleaved RB circuits
print (rb_interleaved_circs[0][0])
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_lima')
properties = device.properties()
coupling_map = device.configuration().coupling_map
backend = device
hardware = device.name()
shots = 2**9
basis_gates = ['id', 'rz', 'sx', 'x', 'cx', 'reset']
noise_model = None
epc_calib = 6.401E-3 # as noted at experiment time
retrieve_list = []
original_result_list, original_transpile_list = bf.get_and_run_seeds(rb_circs=rb_original_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
retrieve_list = []
interleaved_result_list, interleaved_transpile_list = bf.get_and_run_seeds(rb_circs=rb_interleaved_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
Y1 = bf.get_count_data(original_result_list, nCliffs=nCliffs )
Y2 = bf.get_count_data(interleaved_result_list, nCliffs=nCliffs)
# function to optimize
def lsf(x, a, alpha, b):
return a * alpha ** x + b
# curve fit
popt_s,pcov_s = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y1)/shots,
bounds = ([.65,.9,0.15],[.85,.999,0.35]))
perr_s= np.sqrt(np.diag(pcov_s))
# get EPC and EPC sigma for LSF accelerated
alpha_f = popt_s[1]
alpha_f_err = perr_s[1]
popt_s,perr_s
# curve fit
popt_i,pcov_i = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y2)/shots,
bounds = ([.65,.9,0.15],[.85,.999,0.35]))
perr_i= np.sqrt(np.diag(pcov_i))
# get EPC and EPC sigma for LSF accelerated
alphC_f = popt_i[1]
alphC_f_err = perr_i[1]
popt_i,perr_i
epc_est_f = scale*(1 - alphC_f/alpha_f)
epc_est_f_err = scale*(alphC_f/alpha_f)*(np.sqrt(alpha_f_err**2 + alphC_f_err**2))
# function to optimize
def lsmf(x, a, alpha, p_tilde_m, b):
return x[1]*(a * alpha ** x[0] + b) + x[2]*(a * (alpha*p_tilde_m) ** x[0] + b)
# obtain the data
m_len = len(nCliffs)*nseeds
x0_lsmf = np.array(nseeds*2*list(nCliffs))
x1_lsmf = np.hstack((np.ones(m_len),np.zeros(m_len)))
x2_lsmf = np.hstack((np.zeros(m_len),np.ones(m_len)))
x_lsmf = np.vstack((x0_lsmf,x1_lsmf,x2_lsmf))
y_lsmf=np.hstack((np.ravel(Y1),np.ravel(Y2)))/shots
# curve fit
popt_m,pcov_m = curve_fit(lsmf, x_lsmf, y_lsmf,
bounds = ([.65,.9,.9,0.15],
[.85,.999,.999,0.35]))
perr_m = np.sqrt(np.diag(pcov_m))
# get EPC and EPC sigma for LSF accelerated
alpha_fm = popt_m[1]
p_tilde_m = popt_m[2]
alpha_fm_err = perr_m[1]
p_tilde_m_err = perr_m[2]
popt_m,perr_m
epc_est_fm = scale*(1 - p_tilde_m)
epc_est_fm_err = scale*p_tilde_m_err
original_model = bf.get_bayesian_model(model_type="pooled",Y=Y1,shots=shots,m_gates=nCliffs,
mu_AB=[popt_s[0],popt_s[2]],cov_AB=[perr_s[0],perr_s[2]],
alpha_ref=alpha_f)
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model)
azo_summary = bf.get_summary(original_model, trace_o)
azo_summary
alpha_original_p = azo_summary['mean']['alpha']
alpha_original_p_err = azo_summary['sd']['alpha']
interleaved_model = bf.get_bayesian_model(model_type="pooled",Y=Y2,shots=shots,m_gates=nCliffs,
mu_AB=[popt_i[0],popt_i[2]],cov_AB=[perr_i[0],perr_i[2]],
alpha_ref=alphC_f)
pm.model_to_graphviz(interleaved_model)
trace_i = bf.get_trace(interleaved_model)
azi_summary = bf.get_summary(interleaved_model, trace_i)
azi_summary
alpha_c_p = azi_summary['mean']['alpha']
alpha_c_p_err = azi_summary['sd']['alpha']
epc_est_p = scale*(1 - alpha_c_p/alpha_original_p)
epc_est_p_err = scale*(alpha_c_p/alpha_original_p)*(np.sqrt(alpha_original_p_err**2 + alpha_c_p_err**2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde =bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=nCliffs,
alpha_ref=alpha_fm, p_testval= p_tilde_m,
mu_AB=[popt_m[0],popt_m[3]],cov_AB=[perr_m[0],perr_m[3]],
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde)
trace_t = bf.get_trace(tilde)
azt_summary = bf.get_summary(tilde, trace_t)
azt_summary
epc_est_a = scale*(1 - azt_summary['mean']['p_tilde'])
epc_est_a_err = scale* (azt_summary['sd']['p_tilde'])
# compare LSF and SMC
print("Model: Frequentist Bayesian Calibration")
print(" two-run accelerated two-run accelerated ")
print("EPC {0:.5f} {1:.5f} {2:.5f} {3:.5f} {4:.5f} "
.format(epc_est_f, epc_est_fm, epc_est_p, epc_est_a, epc_calib))
print("± sigma ± {0:.5f} ± {1:.5f} ± {2:.5f} ± {3:.5f} ------ "
.format(epc_est_f_err, epc_est_fm_err, epc_est_p_err, epc_est_a_err))
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# for refering the interleaved gate in the title of the graphs
intl_g=str(interleaved_gates[0][0][0:2])+str(rb_pattern[0][0:2])
if RB_process in ["3_Q RB","2-3_Q RB"] :
intl_g=intl_g+"<"+str(interleaved_gates[1][0][0:1]+str(rb_pattern[1][0:2]))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
with tilde:
ax = az.plot_posterior(trace_t, var_names=['p_tilde'], round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(0.0, 0.010)
plt.axvline(x=epc_est_fm,color='red',ls=":")
plt.axvline(x=epc_est_p,color='orange',ls="-")
plt.axvline(x=epc_est_f,color='cyan',ls="-")
if epc_calib > 0.0:
plt.axvline(x=epc_calib,color='green',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(RB_process +' $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=12)
Bayes_legend = "EPC Accelerated SMC: {0:1.3e} ({1:1.3e})".format(epc_est_a, epc_est_a_err)
Bayes2_legend = "EPC SMC 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_p, epc_est_p_err)
Fitter_legend = "EPC LSF 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_f, epc_est_f_err)
LSM_legend = "EPC Accelerated LSF: {0:1.3e} ({1:1.3e})".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend,Bayes2_legend,
Fitter_legend,Cal_legend), fontsize=10 )
else:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend, Bayes2_legend,
Fitter_legend), fontsize=10 )
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(nseeds):
plt.scatter(nCliffs, Y1[i_seed,:]/shots, label = "data", marker="x",color="b")
plt.scatter(nCliffs, Y2[i_seed,:]/shots, label = "data", marker="+",color="r")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**nCliffs+\
azt_summary['mean']['AB[1]'],'--',color="b")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*(azt_summary['mean']['alpha']*azt_summary['mean']['p_tilde'])**\
nCliffs+azt_summary['mean']['AB[1]'],'--',color="r")
plt.legend(("Standard",
"Interleaved"))
plt.set_title(RB_process +' SMC $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=14);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
#from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
RB_process = "2_Q RB"
if RB_process in ["3_Q RB","2-3_Q RB"] :
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[1,2],[3]] # because 3 qubits
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
#Interleaved Clifford gates (2-qubits and 1-qubit)
interleaved_gates = [['cx 0 1'],['x 2']]
else:
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
interleaved_gates = [['cx 0,1']]
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = [1, 20, 40, 60, 100, 150, 200, 300, 400, 500]
#Number of seeds (random sequences)
nseeds=8
scale = (2 ** len(rb_pattern[0]) - 1) / (2 ** len(rb_pattern[0]))
qregs_02 = QuantumRegister(2)
circ_02 = QuantumCircuit(qregs_02, name='circ_02')
#circ_02.h(qregs_02[0]) # booptrap! WIP!
circ_02.cx(qregs_02[0], qregs_02[1])
circ_02.draw()
qregs_1 = QuantumRegister(1)
circ_1 = QuantumCircuit(qregs_1, name='circ_1')
circ_1.x(qregs_1[0]) # booptrap! WIP!
circ_1.draw()
rb_opts = {}
rb_opts['rand_seed'] = 61946
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
#rb_opts['align_cliffs'] = True
if RB_process in ["3_Q RB","2-3_Q RB"]:
rb_opts['interleaved_elem'] = [circ_02, circ_1]
if RB_process == "2_Q RB":
rb_opts['interleaved_elem'] = [circ_02]
rb_original_circs, xdata, rb_interleaved_circs = rb.randomized_benchmarking_seq(**rb_opts)
#Original RB circuits
print (rb_original_circs[0][0])
#Interleaved RB circuits
print (rb_interleaved_circs[0][0])
from qiskit import IBMQ
from qiskit import Aer
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_lima') # type here hardware backend
properties = device.properties()
coupling_map = device.configuration().coupling_map
# use a noise model corresponding to the chosen real device backend
basis_gates = ['id', 'rz', 'sx', 'x', 'cx', 'reset']
hardware = device.name()
backend = Aer.get_backend('qasm_simulator')
shots = 2**9
noise_model = NoiseModel.from_backend(properties)
retrieve_list = []
original_result_list, original_transpile_list = bf.get_and_run_seeds(rb_circs=rb_original_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
retrieve_list = []
interleaved_result_list, interleaved_transpile_list = bf.get_and_run_seeds(rb_circs=rb_interleaved_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
Y1 = bf.get_count_data(original_result_list, nCliffs=nCliffs )
Y2 = bf.get_count_data(interleaved_result_list, nCliffs=nCliffs)
# function to optimize
def lsf(x, a, alpha, b):
return a * alpha ** x + b
# curve fit
popt_s,pcov_s = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y1)/shots,
bounds = ([.65,.9,0.15],[.85,.999,0.35]))
perr_s= np.sqrt(np.diag(pcov_s))
# get EPC and EPC sigma for LSF accelerated
alpha_f = popt_s[1]
alpha_f_err = perr_s[1]
popt_s,perr_s
# curve fit
popt_i,pcov_i = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y2)/shots,
bounds = ([.65,.9,0.15],[.85,.999,0.35]))
perr_i= np.sqrt(np.diag(pcov_i))
# get EPC and EPC sigma for LSF accelerated
alphC_f = popt_i[1]
alphC_f_err = perr_i[1]
popt_i,perr_i
epc_est_f = scale*(1 - alphC_f/alpha_f)
epc_est_f_err = scale*(alphC_f/alpha_f)*(np.sqrt(alpha_f_err**2 + alphC_f_err**2))
# function to optimize
def lsmf(x, a, alpha, p_tilde_m, b):
return x[1]*(a * alpha ** x[0] + b) + x[2]*(a * (alpha*p_tilde_m) ** x[0] + b)
# obtain the data
m_len = len(nCliffs)*nseeds
x0_lsmf = np.array(nseeds*2*list(nCliffs))
x1_lsmf = np.hstack((np.ones(m_len),np.zeros(m_len)))
x2_lsmf = np.hstack((np.zeros(m_len),np.ones(m_len)))
x_lsmf = np.vstack((x0_lsmf,x1_lsmf,x2_lsmf))
y_lsmf=np.hstack((np.ravel(Y1),np.ravel(Y2)))/shots
# curve fit
popt_m,pcov_m = curve_fit(lsmf, x_lsmf, y_lsmf,
bounds = ([.65,.9,.9,0.15],
[.85,.999,.999,0.35]))
perr_m = np.sqrt(np.diag(pcov_m))
# get EPC and EPC sigma for LSF accelerated
alpha_fm = popt_m[1]
p_tilde_m = popt_m[2]
alpha_fm_err = perr_m[1]
p_tilde_m_err = perr_m[2]
popt_m,perr_m
epc_est_fm = scale*(1 - p_tilde_m)
epc_est_fm_err = scale*p_tilde_m_err
original_model = bf.get_bayesian_model(model_type="pooled",Y=Y1,shots=shots,m_gates=nCliffs,
mu_AB=[popt_s[0],popt_s[2]],cov_AB=[perr_s[0],perr_s[2]],
alpha_ref=alpha_f)
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model)
azo_summary = bf.get_summary(original_model, trace_o)
azo_summary
alpha_original_p = azo_summary['mean']['alpha']
alpha_original_p_err = azo_summary['sd']['alpha']
interleaved_model = bf.get_bayesian_model(model_type="pooled",Y=Y2,shots=shots,m_gates=nCliffs,
mu_AB=[popt_i[0],popt_i[2]],cov_AB=[perr_i[0],perr_i[2]],
alpha_ref=alphC_f)
pm.model_to_graphviz(interleaved_model)
trace_i = bf.get_trace(interleaved_model)
azi_summary = bf.get_summary(interleaved_model, trace_i)
azi_summary
alpha_c_p = azi_summary['mean']['alpha']
alpha_c_p_err = azi_summary['sd']['alpha']
epc_est_p = scale*(1 - alpha_c_p/alpha_original_p)
epc_est_p_err = scale*(alpha_c_p/alpha_original_p)*(np.sqrt(alpha_original_p_err**2 + alpha_c_p_err**2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde =bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=nCliffs,
alpha_ref=alpha_fm, p_testval= p_tilde_m,
mu_AB=[popt_m[0],popt_m[3]],cov_AB=[perr_m[0],perr_m[3]],
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde)
trace_t = bf.get_trace(tilde)
azt_summary = bf.get_summary(tilde, trace_t)
azt_summary
epc_est_a = scale*(1 - azt_summary['mean']['p_tilde'])
epc_est_a_err = scale* (azt_summary['sd']['p_tilde'])
epc_calib = 0.0 # not for simulation
# compare LSF and SMC
print("Model: Frequentist Bayesian ")
print(" two-run accelerated two-run accelerated ")
print("EPC {0:.5f} {1:.5f} {2:.5f} {3:.5f} "
.format(epc_est_f, epc_est_fm, epc_est_p, epc_est_a))
print("± sigma ± {0:.5f} ± {1:.5f} ± {2:.5f} ± {3:.5f} "
.format(epc_est_f_err, epc_est_fm_err, epc_est_p_err, epc_est_a_err))
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# for refering the interleaved gate in the title of the graphs
intl_g=str(interleaved_gates[0][0][0:2])+str(rb_pattern[0][0:2])
if RB_process in ["3_Q RB","2-3_Q RB"] :
intl_g=intl_g+"<"+str(interleaved_gates[1][0][0:1]+str(rb_pattern[1][0:2]))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
with tilde:
ax = az.plot_posterior(trace_t, var_names=['p_tilde'], round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(0.003, 0.007)
plt.axvline(x=epc_est_fm,color='red',ls="-")
plt.axvline(x=epc_est_p,color='orange',ls="-")
plt.axvline(x=epc_est_f,color='cyan',ls="-")
if epc_calib > 0.0:
plt.axvline(x=epc_calib,color='green',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(RB_process +' $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=12)
Bayes_legend = "EPC Accelerated SMC: {0:1.3e} ({1:1.3e})".format(epc_est_a, epc_est_a_err)
Bayes2_legend = "EPC SMC 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_p, epc_est_p_err)
Fitter_legend = "EPC LSF 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_f, epc_est_f_err)
LSM_legend = "EPC Accelerated LSF: {0:1.3e} ({1:1.3e})".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend,Bayes2_legend,
Fitter_legend,Cal_legend), fontsize=10 )
else:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend, Bayes2_legend,
Fitter_legend), fontsize=10 )
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(nseeds):
plt.scatter(nCliffs, Y1[i_seed,:]/shots, label = "data", marker="x",color="b")
plt.scatter(nCliffs, Y2[i_seed,:]/shots, label = "data", marker="+",color="r")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**nCliffs+\
azt_summary['mean']['AB[1]'],'--',color="b")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*(azt_summary['mean']['alpha']*azt_summary['mean']['p_tilde'])**\
nCliffs+azt_summary['mean']['AB[1]'],'--',color="r")
plt.legend(("Standard",
"Interleaved"))
plt.set_title(RB_process +' SMC $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=14);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
import time
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
def obtain_priors_and_data_from_fitter(rbfit, nCliffs, shots, printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
#alpha_lower = alpha_ref - 6*rbfit._fit[0]['params_err'][1]
#alpha_upper = alpha_ref + 6*rbfit._fit[0]['params_err'][1]
alpha_lower = .95*alpha_ref
alpha_upper = min(1.05*alpha_ref,1.0)
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigma theta:
sigma_theta = 0.004 # WIP
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
# modified for accelerated BM with EPCest as extra parameter
def get_bayesian_model(model_type,Y,shots,m_gates,mu_AB,cov_AB, alpha_ref,
alpha_lower=0.5,alpha_upper=0.999,alpha_testval=0.9,
p_lower=0.9,p_upper=0.999,p_testval=0.95,
RvsI=None,IvsR=None,sigma_theta=0.001,
sigma_theta_l=0.0005,sigma_theta_u=0.0015):
# Bayesian model
# from https://iopscience.iop.org/arti=RvsI, cle/1sigma_theta=0.004,0.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
total_shots = np.full(Y.shape, shots)
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
if model_type == "hierarchical":
GSP = AB[0]*alpha**m_gates + AB[1]
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_h", p=theta, observed=Y,
n = total_shots)
elif model_type == "h_sigma":
sigma_t = pm.Uniform("sigma_t", testval = sigma_theta,
upper = sigma_theta_u, lower = sigma_theta_l)
GSP = AB[0]*alpha**m_gates + AB[1]
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_t,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_h", p=theta, observed=Y,
n = total_shots)
elif model_type == "tilde":
p_tilde = pm.Uniform("p_tilde",lower=p_lower,
upper=p_upper, testval = p_testval)
GSP = AB[0]*(RvsI*alpha**m_gates + IvsR*(alpha*p_tilde)**m_gates) + AB[1]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_t", p=GSP, observed=Y,
n = total_shots)
else: # defaul model "pooled"
GSP = AB[0]*alpha**m_gates + AB[1]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_p", p=GSP, observed=Y,
n = total_shots)
return RB_model
def get_bayesian_model_hierarchical(model_type,Y): # modified for accelerated BM with EPCest as extra parameter
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RBH_model = pm.Model()
with RBH_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RBH_model
def get_trace(RB_model, draws = 2000, tune= 10000, target_accept=0.95, return_inferencedata=True):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = draws, tune= tune, target_accept=target_accept,
return_inferencedata=return_inferencedata)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, round_to=6, hdi_prob=.94, kind='stats'):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=round_to, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior) # deprecated, should use scale
#def alpha_to_EPC(alpha):
#return 3*(1-alpha)/4
def get_EPC_and_legends(rbfit,azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
if pred_epc > 0.0:
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
else:
pred_epc_legend = ''
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace,m_name,rbfit):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(rbfit,azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford "+RB_process+" device: "+hardware
+' backend: '+backend.name()+' model:'+m_name,
fontsize=12)
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
if pred_epc > 0.0:
plt.axvline(x=pred_epc,color='green')
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10)
else:
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs,m_name,rbfit):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(rbfit,azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
#axes.plot(m_gates,azs['mean']['GSP'],'--') # WIP
#axes.errorbar(m_gates, azs['mean']['GSP'], azs['sd']['GSP'], linestyle='None', marker='^') # WIP
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates-0.25, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
axes.set_title(RB_process+" device: "+hardware+' backend: '+backend.name()+' model:'+m_name,
fontsize=14) # WIP
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
def get_and_run_seeds(rb_circs, shots, backend, coupling_map,
basis_gates, noise_model, retrieve_list=[]):
#basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
result_list = []
transpile_list = []
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
optimization_level=0,
basis_gates=basis_gates)
print('Runing seed %d'%rb_seed)
if retrieve_list == []:
if noise_model == None: # this indicates harware run
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend,
coupling_map=coupling_map,
basis_gates=basis_gates)
else:
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend,
coupling_map=coupling_map,
noise_model=noise_model,
basis_gates=basis_gates)
job_monitor(job)
else:
job = backend.retrieve_job(retrieve_list[rb_seed])
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Jobs")
return result_list, transpile_list
def get_count_data(result_list, nCliffs):
### another way to obtain the observed counts
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for rbseed, result in enumerate(result_list):
row_list = []
for c_index, c_value in enumerate(nCliffs) :
total_counts = 0
for key,val in result.get_counts()[c_index].items():
if key in list_bitstring:
total_counts += val
#print(key,val,total_counts)
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
# This section for the LS fit in this model pooling
# data from 2Q and 3Q interleave processes
def func(x, a, b, c):
return a * b ** x + c
def epc_fitter_when_mixed_2Q_3Q_RB(X,Y1,Y2,shots,check_plot=False):
xdata = np.array(list(X)*Y1.shape[0]) # must be something simpler
ydata1 = np.ravel(Y1)/shots
popt, pcov = curve_fit(func, xdata, ydata1)
perr= np.sqrt(np.diag(pcov))
ydata2 = np.ravel(Y2)/shots
popt2, pcov2 = curve_fit(func, xdata, ydata2)
perr2= np.sqrt(np.diag(pcov2))
if check_plot:
import matplotlib.pyplot as plt
plt.plot(xdata, ydata1, 'bx', label='Reference')
plt.plot(xdata, ydata2, 'r+', label='Interleave')
plt.plot(X, np.mean(Y1,axis=0)/shots, 'b-', label=None)
plt.plot(X, np.mean(Y2,axis=0)/shots, 'r-', label=None)
plt.ylabel('Population of |00>')
plt.xlabel('Number of Cliffords')
plt.legend()
plt.show()
print(popt[1])
print(perr[1])
print(popt2[1])
print(perr2[1])
epc_est_fitter = 3*(1 - popt2[1]/popt[1])/4
epc_est_fitter_err = 3*(popt2[1]/popt[1])/4 * (np.sqrt(perr[1]**2 + perr2[1]**2))
return epc_est_fitter, epc_est_fitter_err
# This section for the demo with qiskit experiment
def retrieve_from_lsf(exp):
perr_fm = np.sqrt(np.diag(exp._analysis_results[0]['pcov']))
popt_fm = exp._analysis_results[0]['popt']
epc_est_fm = exp._analysis_results[0]['EPC']
epc_est_fm_err = exp._analysis_results[0]['EPC_err']
experiment_type = exp._data[0]['metadata']['experiment_type']
return perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type
def get_GSP_counts(data, x_length, data_range):
#obtain the observed counts used in the bayesian model
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for i_samples in data_range:
row_list = []
for c_index in range(x_length) :
total_counts = 0
i_data = i_samples*x_length + c_index
for key,val in data[i_data]['counts'].items():
if key in list_bitstring:
total_counts += val
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
def RB_bayesian_results(resmodel, trace, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = None, epc_calib = np.nan, Y1 = None, Y2= None, show_plot = True):
# obtain EPC from alpha (used by az.plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
azt_summary = get_summary(resmodel, trace, kind = 'stats')
print(azt_summary,'\n')
if experiment_type == "StandardRB":
p = 'alpha'
epc_est_a = scale*(1 - azt_summary['mean'][p])
epc_est_a_err = scale* (azt_summary['sd'][p])
# compare LSF and SMC
print("Model: Frequentist Bayesian")
print("_______________________________________")
print("EPC {0:1.3e} {1:1.3e} "
.format(epc_est_fm,epc_est_a))
print("± sigma ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
for i, (gate,EPG) in enumerate(EPG_dic.items()):
print("{0:<12}{1:1.3e} {2:1.3e}"
.format("EPG "+gate,EPG,EPG*epc_est_a/epc_est_fm))
if show_plot == False:
return
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("P(0)")
plt.set_xlabel("Cliffords Length")
plt.plot(lengths,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**lengths+\
azt_summary['mean']['AB[1]'],'-',color="r")
for i_seed in range(num_samples):
plt.scatter(lengths, Y[i_seed,:]/shots, label = "data", marker="x",color="grey")
plt.set_title(experiment_type +', ' + "qubit: " + str(physical_qubits)\
+', backend: '+backend.name(),
fontsize=14);
elif experiment_type == "InterleavedRB":
p = 'p_tilde'
epc_est_a = scale*(1 - azt_summary['mean'][p])
epc_est_a_err = scale* (azt_summary['sd'][p])
# compare LSF and SMC
print("Model: Frequentist Bayesian Calibration")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sigma ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
if show_plot ==False:
return
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("P(0)")
plt.set_xlabel("Cliffords Length")
for i_seed in range(num_samples):
plt.scatter(lengths, Y1[i_seed,:]/shots, label = "data", marker="x",color="r")
plt.scatter(lengths, Y2[i_seed,:]/shots, label = "data", marker="+",color="orange")
plt.plot(lengths,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**lengths+\
azt_summary['mean']['AB[1]'],'--',color="r")
plt.plot(lengths,azt_summary['mean']['AB[0]']*(azt_summary['mean']['alpha']*azt_summary['mean']['p_tilde'])**\
lengths+azt_summary['mean']['AB[1]'],'--',color="orange")
plt.legend(("Standard, SMC model",
"Interleaved, SMC model"))
plt.set_title(experiment_type +', ' + interleaved_gate + str(physical_qubits)\
+', backend: '+backend.name(),
fontsize=14);
import matplotlib.pyplot as plt # if not yet imported
#plt.rcParams["figure.figsize"] = plt.rcParamsDefault["figure.figsize"] # to reset to default
plt.rcParams["figure.figsize"] = (8,5)
with resmodel:
ax = az.plot_posterior(trace, var_names=[p], round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(epc_est_a - 6*epc_est_a_err, epc_est_a + 6*epc_est_a_err)
plt.axvline(x=epc_est_fm,color='cyan',ls="-")
if epc_calib != np.nan:
plt.axvline(x=epc_calib,color='r',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(experiment_type +', ' + interleaved_gate + " qubit(s):" + str(physical_qubits)\
+', backend: '+backend.name(),
fontsize=14)
Bayes_legend = "EPC SMC: {0:1.3e} ± {1:1.3e}".format(epc_est_a, epc_est_a_err)
LSF_legend = "EPC LSF: {0:1.3e} ± {1:1.3e}".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend,
Cal_legend), fontsize=12 )
else:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend), fontsize=12 )
# obtain EPC from alpha and scale(used by az.plot_posterior)
def alpha_to_EPC_from_scale(alpha, scale):
return scale*(1-alpha)
# guess number of shots
def guess_shots(Y):
shot_exp = 1
test_shot = np.max(Y)
while test_shot > 2**shot_exp:
shot_exp += 1
return 2**shot_exp
def bayesian_standard_RB_model():
# construct model
RB_model = get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
RvsI=None,IvsR=None)
return RB_model
def bayesian_interleaved_RB_model():
# construct model
RB_model = get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
alpha_ref=popt_fm[1], p_testval= popt_fm[2],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
p_lower=popt_fm[2]-6*perr_fm[2],
p_upper=min(1.-1.E-6,popt_fm[2]+6*perr_fm[2]),
mu_AB=[popt_fm[0],popt_fm[3]],cov_AB=[perr_fm[0],perr_fm[3]],
RvsI=RvsI,IvsR=IvsR)
return RB_model
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
import time
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
def obtain_priors_and_data_from_fitter(rbfit, nCliffs, shots, printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
#alpha_lower = alpha_ref - 6*rbfit._fit[0]['params_err'][1]
#alpha_upper = alpha_ref + 6*rbfit._fit[0]['params_err'][1]
alpha_lower = .95*alpha_ref
alpha_upper = min(1.05*alpha_ref,1.0)
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigma theta:
sigma_theta = 0.004 # WIP
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
# modified for accelerated BM with EPCest as extra parameter
def get_bayesian_model(model_type,Y,shots,m_gates,mu_AB,cov_AB, alpha_ref,
alpha_lower=0.5,alpha_upper=0.999,alpha_testval=0.9,
p_lower=0.9,p_upper=0.999,p_testval=0.95,
RvsI=None,IvsR=None,sigma_theta=0.004):
# Bayesian model
# from https://iopscience.iop.org/arti=RvsI, cle/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
total_shots = np.full(Y.shape, shots)
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
if model_type == "hierarchical":
GSP = AB[0]*alpha**m_gates + AB[1]
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_h", p=theta, observed=Y,
n = total_shots)
elif model_type == "tilde":
p_tilde = pm.Uniform("p_tilde",lower=p_lower,
upper=p_upper, testval = p_testval)
GSP = AB[0]*(RvsI*alpha**m_gates + IvsR*(alpha*p_tilde)**m_gates) + AB[1]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_t", p=GSP, observed=Y,
n = total_shots)
else: # defaul model "pooled"
GSP = AB[0]*alpha**m_gates + AB[1]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_p", p=GSP, observed=Y,
n = total_shots)
return RB_model
def get_bayesian_model_hierarchical(model_type,Y): # modified for accelerated BM with EPCest as extra parameter
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RBH_model = pm.Model()
with RBH_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RBH_model
def get_trace(RB_model, draws = 2000, tune= 10000, target_accept=0.95, return_inferencedata=True):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = draws, tune= tune, target_accept=target_accept,
return_inferencedata=return_inferencedata)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, round_to=6, hdi_prob=.94, kind='all', ):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=round_to, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior) # deprecated, should use scale
def alpha_to_EPC(alpha):
return 3*(1-alpha)/4
def get_EPC_and_legends(rbfit,azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
if pred_epc > 0.0:
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
else:
pred_epc_legend = ''
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace,m_name,rbfit):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(rbfit,azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford "+RB_process+" device: "+hardware
+' backend: '+backend.name()+' model:'+m_name,
fontsize=12)
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
if pred_epc > 0.0:
plt.axvline(x=pred_epc,color='green')
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10)
else:
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs,m_name,rbfit):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(rbfit,azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
#axes.plot(m_gates,azs['mean']['GSP'],'--') # WIP
#axes.errorbar(m_gates, azs['mean']['GSP'], azs['sd']['GSP'], linestyle='None', marker='^') # WIP
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates-0.25, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
axes.set_title(RB_process+" device: "+hardware+' backend: '+backend.name()+' model:'+m_name,
fontsize=14) # WIP
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
def get_and_run_seeds(rb_circs, shots, backend, coupling_map,
basis_gates, noise_model, retrieve_list=[]):
#basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
result_list = []
transpile_list = []
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
optimization_level=0,
basis_gates=basis_gates)
print('Runing seed %d'%rb_seed)
if retrieve_list == []:
if noise_model == None: # this indicates harware run
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend,
coupling_map=coupling_map,
basis_gates=basis_gates)
else:
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend,
coupling_map=coupling_map,
noise_model=noise_model,
basis_gates=basis_gates)
job_monitor(job)
else:
job = backend.retrieve_job(retrieve_list[rb_seed])
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Jobs")
return result_list, transpile_list
def get_count_data(result_list, nCliffs):
### another way to obtain the observed counts
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for rbseed, result in enumerate(result_list):
row_list = []
for c_index, c_value in enumerate(nCliffs) :
total_counts = 0
for key,val in result.get_counts()[c_index].items():
if key in list_bitstring:
total_counts += val
#print(key,val,total_counts)
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
# This section for the LS fit in this model pooling
# data from 2Q and 3Q interleave processes
def func(x, a, b, c):
return a * b ** x + c
def epc_fitter_when_mixed_2Q_3Q_RB(X,Y1,Y2,shots,check_plot=False):
xdata = np.array(list(X)*Y1.shape[0]) # must be something simpler
ydata1 = np.ravel(Y1)/shots
popt, pcov = curve_fit(func, xdata, ydata1)
perr= np.sqrt(np.diag(pcov))
ydata2 = np.ravel(Y2)/shots
popt2, pcov2 = curve_fit(func, xdata, ydata2)
perr2= np.sqrt(np.diag(pcov2))
if check_plot:
import matplotlib.pyplot as plt
plt.plot(xdata, ydata1, 'bx', label='Reference')
plt.plot(xdata, ydata2, 'r+', label='Interleave')
plt.plot(X, np.mean(Y1,axis=0)/shots, 'b-', label=None)
plt.plot(X, np.mean(Y2,axis=0)/shots, 'r-', label=None)
plt.ylabel('Population of |00>')
plt.xlabel('Number of Cliffords')
plt.legend()
plt.show()
print(popt[1])
print(perr[1])
print(popt2[1])
print(perr2[1])
epc_est_fitter = 3*(1 - popt2[1]/popt[1])/4
epc_est_fitter_err = 3*(popt2[1]/popt[1])/4 * (np.sqrt(perr[1]**2 + perr2[1]**2))
return epc_est_fitter, epc_est_fitter_err
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
# import the bayesian packages
import pymc3 as pm
import arviz as az
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
from qiskit.tools.monitor import job_monitor
device = provider.get_backend('ibmq_lima')
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
def obtain_priors_and_data_from_fitter(printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
alpha_lower = alpha_ref - 2*rbfit._fit[0]['params_err'][1] # modified for real
alpha_upper = alpha_ref + 2*rbfit._fit[0]['params_err'][1] # modified for real
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigmatheta:
sigma_theta = 0.004
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
def get_bayesian_model(model_type):
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
if model_type == "pooled":
total_shots = np.full(Y.shape, shots)
theta = GSP
elif model_type == "hierarchical":
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RB_model
def get_trace(RB_model):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = 2000, tune= 10000, target_accept=0.9, return_inferencedata=True)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, hdi_prob=.94, kind='all'):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=4, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return 3*(1-alpha)/4
def get_EPC_and_legends(azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford")
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
#plt.axvline(x=pred_epc,color='green') # WIP
#plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10 )# WIP
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
#axes.plot(m_gates,azs['mean']['GSP'],'--') # WIP
#axes.errorbar(m_gates, azs['mean']['GSP'], azs['sd']['GSP'], linestyle='None', marker='^') # WIP
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates-0.25, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
#axes.set_title('2 Qubit RB with T1/T2 Noise', fontsize=18) # WIP
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
def get_count_data(result_list):
### another way to obtain the observed counts
Y_list = []
for rbseed, result in enumerate(result_list):
row_list = []
for c_index, c_value in enumerate(nCliffs):
if nQ == 2:
list_bitstring = ['00']
elif nQ == 3:
list_bitstring = ['000', '100'] # because q2 measured in c1
total_counts = 0
for bitstring in list_bitstring:
total_counts += result.get_counts()[c_index][bitstring]
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#Number of seeds (random sequences)
nseeds = 10 # more data for the Rev. Mr. Bayes
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = np.arange(1,200,20)
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
rb_opts = {}
rb_opts ['length_vector'] = nCliffs
rb_opts ['nseeds'] = nseeds
rb_opts ['rb_pattern'] = rb_pattern
rb_opts ['length_multiplier'] = length_multiplier
rb_circs , xdata = rb.randomized_benchmarking_seq(**rb_opts )
backend = device
basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
shots = 1024
result_list = []
transpile_list = []
import time
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
optimization_level=0,
basis_gates=basis_gates)
print('Runing seed %d'%rb_seed)
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend)
job_monitor(job)
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Real Jobs")
print(rb_circs[0][0])
#Create an RBFitter object
rbfit = rb.RBFitter(result_list, xdata, rb_opts['rb_pattern'])
m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta =\
obtain_priors_and_data_from_fitter(printout = True)
### a check of the count matrix
np.sum((Y == (get_count_data(result_list)))*1) == Y.size
pooled = get_bayesian_model("pooled")
pm.model_to_graphviz(pooled)
trace_p = get_trace(pooled)
azp_summary = get_summary(pooled, trace_p)
azp_summary
hierarchical = get_bayesian_model("hierarchical")
pm.model_to_graphviz(hierarchical)
trace_h = get_trace(hierarchical)
azh_summary = get_summary(hierarchical, trace_h)
azh_summary
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
# predict EPC from the noisy model
#pred_epc = get_predicted_EPC(error_source = 'from_T1_T2') # this was for a noise model
pred_epc = 0.0165 # will not appear on graphs for real device but at this point functions need value (WIP)
print("Fake 2Q Error per Clifford: %e"%pred_epc)
EPC_compare_fitter_to_bayes(pooled, azp_summary, trace_p)
EPC_compare_fitter_to_bayes(hierarchical, azh_summary, trace_h)
GSP_compare_fitter_to_bayes(pooled, azp_summary)
GSP_compare_fitter_to_bayes(hierarchical, azh_summary)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
%load_ext watermark
%watermark -n -u -v -iv -w
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
# import the bayesian packages
import pymc3 as pm
import arviz as az
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
def obtain_priors_and_data_from_fitter(printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
alpha_lower = alpha_ref - 5*rbfit._fit[0]['params_err'][1]
alpha_upper = alpha_ref + 5*rbfit._fit[0]['params_err'][1]
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigmatheta:
sigma_theta = 0.004
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
def get_bayesian_model(model_type):
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
if model_type == "pooled":
total_shots = np.full(Y.shape, shots)
theta = GSP
elif model_type == "hierarchical":
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RB_model
def get_trace(RB_model):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = 2000, tune= 10000, target_accept=0.9, return_inferencedata=True)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, hdi_prob=.94, kind='all'):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=4, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return 3*(1-alpha)/4
def get_EPC_and_legends(azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford")
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
plt.axvline(x=pred_epc,color='green')
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
#axes.set_title('2 Qubit RB with T1/T2 Noise', fontsize=18)
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#Number of seeds (random sequences)
nseeds = 8
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = np.arange(1,200,20)
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[0,2],[1]]
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
rb_pattern[0][1]
rb_opts = {}
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
rb_circs, xdata = rb.randomized_benchmarking_seq(**rb_opts)
print(rb_circs[0][0])
noise_model = NoiseModel()
p1Q = 0.004 # this was doubled with respect to the original example
p2Q = 0.02 # this was doubled with respect to the original example
noise_model.add_all_qubit_quantum_error(depolarizing_error(p1Q, 1), 'u2')
noise_model.add_all_qubit_quantum_error(depolarizing_error(2*p1Q, 1), 'u3')
noise_model.add_all_qubit_quantum_error(depolarizing_error(p2Q, 2), 'cx')
backend = qiskit.Aer.get_backend('qasm_simulator')
basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
shots = 1024
result_list = []
transpile_list = []
import time
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
basis_gates=basis_gates)
print('Simulating seed %d'%rb_seed)
job = qiskit.execute(rb_circ_transpile, noise_model=noise_model,
shots=shots,
backend=backend, max_parallel_experiments=0)
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Simulating")
#Create an RBFitter object
rbfit = rb.fitters.RBFitter(result_list, xdata, rb_opts['rb_pattern'])
m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta =\
obtain_priors_and_data_from_fitter(printout = True)
pooled = get_bayesian_model("pooled")
pm.model_to_graphviz(pooled)
trace_p = get_trace(pooled)
azp_summary = get_summary(pooled, trace_p)
azp_summary
hierarchical = get_bayesian_model("hierarchical")
pm.model_to_graphviz(hierarchical)
trace_h = get_trace(hierarchical)
azh_summary = get_summary(hierarchical, trace_h)
azh_summary
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
# predict EPC from the noisy model
pred_epc = get_predicted_EPC(error_source = 'depolarization')
print("Predicted 2Q Error per Clifford: %e"%pred_epc)
EPC_compare_fitter_to_bayes(pooled, azp_summary, trace_p)
EPC_compare_fitter_to_bayes(hierarchical, azh_summary, trace_h)
GSP_compare_fitter_to_bayes(pooled, azp_summary)
GSP_compare_fitter_to_bayes(hierarchical, azh_summary)
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#Number of seeds (random sequences)
nseeds = 10 # more data for the Rev. Mr. Bayes
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = np.arange(1,200,20)
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
rb_opts = {}
rb_opts ['length_vector'] = nCliffs
rb_opts ['nseeds'] = nseeds
rb_opts ['rb_pattern'] = rb_pattern
rb_opts ['length_multiplier'] = length_multiplier
rb_circs , xdata = rb.randomized_benchmarking_seq(**rb_opts )
noise_model = NoiseModel()
#Add T1/T2 noise to the simulation
t1 = 100.
t2 = 80.
gate1Q = 0.2 # this was doubled with respect to the original example
gate2Q = 1.0 # this was doubled with respect to the original example
noise_model.add_all_qubit_quantum_error(thermal_relaxation_error(t1,t2,gate1Q), 'u2')
noise_model.add_all_qubit_quantum_error(thermal_relaxation_error(t1,t2,2*gate1Q), 'u3')
noise_model.add_all_qubit_quantum_error(
thermal_relaxation_error(t1,t2,gate2Q).tensor(thermal_relaxation_error(t1,t2,gate2Q)), 'cx')
backend = qiskit.Aer.get_backend('qasm_simulator')
basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
shots = 1024 # a typical experimental value
result_list = []
transpile_list = []
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed, basis_gates=basis_gates)
print('Simulating seed %d'%rb_seed)
job = qiskit.execute(rb_circ_transpile, noise_model=noise_model, shots=shots,
backend=backend, max_parallel_experiments=0)
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Simulating")
print(rb_circs[0][0])
#Create an RBFitter object
rbfit = rb.RBFitter(result_list, xdata, rb_opts['rb_pattern'])
m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta =\
obtain_priors_and_data_from_fitter(printout = True)
pooled = get_bayesian_model("pooled")
pm.model_to_graphviz(pooled)
trace_p = get_trace(pooled)
azp_summary = get_summary(pooled, trace_p)
azp_summary
hierarchical = get_bayesian_model("hierarchical")
pm.model_to_graphviz(hierarchical)
trace_h = get_trace(hierarchical)
azh_summary = get_summary(hierarchical, trace_h)
azh_summary
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
# predict EPC from the noisy model
pred_epc = get_predicted_EPC(error_source = 'from_T1_T2')
print("Predicted 2Q Error per Clifford: %e"%pred_epc)
EPC_compare_fitter_to_bayes(pooled, azp_summary, trace_p)
EPC_compare_fitter_to_bayes(hierarchical, azh_summary, trace_h)
GSP_compare_fitter_to_bayes(pooled, azp_summary)
GSP_compare_fitter_to_bayes(hierarchical, azh_summary)
%load_ext watermark
%watermark -n -u -v -iv -w
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
import qiskit_experiments as qe
rb = qe.randomized_benchmarking
from scipy.optimize import curve_fit
# import the bayesian packages
import pymc3 as pm
import arviz as az
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
# choice of "simulation, "real", "retrieve"
option = "simulation"
# Determine the backend
if option == "simulation":
from qiskit.test.mock import FakeAthens
backend = FakeAthens()
hardware = 'ibmq_athens' # hardware reference
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_athens')
backend = device
hardware = device.name() # hardware used
# RB design
q_list = [0]
lengths = [1, 50, 100, 200, 300, 400, 500, 600, 800, 1000]
num_samples = 5
seed = 1010
num_qubits = len(q_list)
scale = (2 ** num_qubits - 1) / (2 ** num_qubits)
shots = 1024
if option != "retrieve":
exp = rb.RBExperiment(q_list, lengths, num_samples=num_samples, seed=seed)
eda= exp.run(backend)
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# one or two qubits for retrieve and for the plots
list_bitstring = ['0', '00']
# obtain the current count values
Y_list = []
if option == "simulation":
for i_sample in range(num_samples*len(lengths)):
Y_list.append(eda.data[i_sample]['counts']\
[eda.data[i_sample]['metadata']['ylabel']])
else: # specify job (fresh job or run some time ago)
job = backend.retrieve_job('6097aed0a4885edfb19508fa') # athens 01'
for rbseed, result in enumerate(job.result().get_counts()):
total_counts = 0
for key,val in result.items():
if key in list_bitstring:
total_counts += val
Y_list.append(total_counts)
Y = np.array(Y_list).reshape(num_samples, len(lengths))
#get LSF EPC and priors
if option != "retrieve":
popt = eda._analysis_results[0]['popt']
pcov = eda._analysis_results[0]['pcov']
else: # manual entry (here for job ''6097aed0a4885edfb19508fa')
popt = [0.7207075, 0.95899375, 0.2545933 ]
pcov = [[ 2.53455272e-04, -9.10001034e-06, -1.29839515e-05],
[-9.10001034e-06, 9.55193998e-06, -7.07822420e-06],
[-1.29839515e-05, -7.07822420e-06, 2.07452483e-05]]
alpha_ref=popt[1]
mu_AB= [popt[0],popt[2]]
alpha_ref_err = np.sqrt(pcov[1][1])
EPC = scale*(1-alpha_ref)
EPC_err = scale*alpha_ref_err
cov_AB= [0.0001, 0.0001]
alpha_lower=0.8
alpha_upper=0.999
sigma_theta = .004
pooled_model = bf.get_bayesian_model(model_type="pooled",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(pooled_model)
with pooled_model:
trace_p= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_p)
with pooled_model:
azp_summary = az.summary(trace_p, hdi_prob=.94, round_to=6, kind="all")
azp_summary
epc_p =scale*(1 - azp_summary['mean']['alpha'])
epc_p_err = scale* (azp_summary['sd']['alpha'])
with pooled_model:
ax = az.plot_posterior(trace_p, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Pooled Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_p, epc_p_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
hierarchical_model = bf.get_bayesian_model(model_type="hierarchical",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(hierarchical_model)
with hierarchical_model:
trace_h= pm.sample(draws = 2000, tune= 10000, target_accept=0.99,
return_inferencedata=True)
az.plot_trace(trace_h)
with hierarchical_model:
azh_summary = az.summary(trace_h, hdi_prob=.94, round_to=6,
kind="all", var_names=["~GSP"])
azh_summary
epc_h =scale*(1 - azh_summary['mean']['alpha'])
epc_h_err = scale* (azh_summary['sd']['alpha'])
with hierarchical_model:
ax = az.plot_posterior(trace_h, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Hierarchical Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_h, epc_h_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
# compare LSF and SMC
print("Model: Frequentist Bayesian")
print(" LSF pooled hierarchical")
print("EPC {0:.5f} {1:.5f} {2:.5f}"
.format(EPC, epc_p, epc_h))
print("ERROR ± {0:.5f} ± {1:.5f} ± {2:.5f}"
.format(EPC_err, epc_p_err, epc_h_err))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
#fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(num_samples):
plt.scatter(lengths, Y[i_seed,:]/1024, label = "data",
marker="+",color="purple")
plt.plot(np.array(lengths)-0.5,azp_summary['mean']['AB[0]']\
*azp_summary['mean']['alpha']**lengths+\
azp_summary['mean']['AB[1]']-0.002,'o-',color="c")
plt.plot(np.array(lengths)+0.5,azh_summary['mean']['AB[0]']\
*azh_summary['mean']['alpha']**\
lengths+azh_summary['mean']['AB[1]']+0.002,'o-',color="orange")
plt.legend(("Pooled Model",
"Hierarchical Model"))
plt.set_title("RB_process: standard "+str(q_list)+\
", device: "+hardware+', backend: '+backend.name(),
fontsize=14);
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
import qiskit_experiments as qe
rb = qe.randomized_benchmarking
from scipy.optimize import curve_fit
# import the bayesian packages
import pymc3 as pm
import arviz as az
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
# choice of "simulation, "real", "retrieve"
option = "retrieve"
# Determine the backend
if option == "simulation":
from qiskit.test.mock import FakeAthens
backend = FakeAthens()
hardware = 'ibmq_athens' # hardware reference
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_athens')
backend = device
hardware = device.name() # hardware used
# RB design
q_list = [0,1]
lengths = [1, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400, 450, 500]
num_samples = 5
seed = 1010
num_qubits = len(q_list)
scale = (2 ** num_qubits - 1) / (2 ** num_qubits)
shots = 1024
if option != "retrieve":
exp = rb.RBExperiment(q_list, lengths, num_samples=num_samples, seed=seed)
eda= exp.run(backend)
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# one or two qubits for retrieve and for the plots
list_bitstring = ['0', '00']
# obtain the current count values
Y_list = []
if option == "simulation":
for i_sample in range(num_samples*len(lengths)):
Y_list.append(eda.data[i_sample]['counts']\
[eda.data[i_sample]['metadata']['ylabel']])
else: # specify job (fresh job or run some time ago)
job = backend.retrieve_job('6097aed0a4885edfb19508fa') # athens 01'
for rbseed, result in enumerate(job.result().get_counts()):
total_counts = 0
for key,val in result.items():
if key in list_bitstring:
total_counts += val
Y_list.append(total_counts)
Y = np.array(Y_list).reshape(num_samples, len(lengths))
#get LSF EPC and priors
if option != "retrieve":
popt = eda._analysis_results[0]['popt']
pcov = eda._analysis_results[0]['pcov']
else: # manual entry (here for job ''6097aed0a4885edfb19508fa')
popt = [0.7207075, 0.95899375, 0.2545933 ]
pcov = [[ 2.53455272e-04, -9.10001034e-06, -1.29839515e-05],
[-9.10001034e-06, 9.55193998e-06, -7.07822420e-06],
[-1.29839515e-05, -7.07822420e-06, 2.07452483e-05]]
alpha_ref=popt[1]
mu_AB= [popt[0],popt[2]]
alpha_ref_err = np.sqrt(pcov[1][1])
EPC = scale*(1-alpha_ref)
EPC_err = scale*alpha_ref_err
cov_AB= [0.0001, 0.0001]
alpha_lower=0.8
alpha_upper=0.999
sigma_theta = .004
pooled_model = bf.get_bayesian_model(model_type="pooled",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(pooled_model)
with pooled_model:
trace_p= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_p)
with pooled_model:
azp_summary = az.summary(trace_p, hdi_prob=.94, round_to=6, kind="all")
azp_summary
epc_p =scale*(1 - azp_summary['mean']['alpha'])
epc_p_err = scale* (azp_summary['sd']['alpha'])
with pooled_model:
ax = az.plot_posterior(trace_p, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Pooled Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_p, epc_p_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
hierarchical_model = bf.get_bayesian_model(model_type="hierarchical",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(hierarchical_model)
with hierarchical_model:
trace_h= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_h)
with hierarchical_model:
azh_summary = az.summary(trace_h, hdi_prob=.94, round_to=6,
kind="all", var_names=["~GSP"])
azh_summary
epc_h =scale*(1 - azh_summary['mean']['alpha'])
epc_h_err = scale* (azh_summary['sd']['alpha'])
with hierarchical_model:
ax = az.plot_posterior(trace_h, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Hierarchical Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_h, epc_h_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
# compare LSF and SMC
print("Model: Frequentist Bayesian")
print(" LSF pooled hierarchical")
print("EPC {0:.5f} {1:.5f} {2:.5f}"
.format(EPC, epc_p, epc_h))
print("ERROR ± {0:.5f} ± {1:.5f} ± {2:.5f}"
.format(EPC_err, epc_p_err, epc_h_err))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
#fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(num_samples):
plt.scatter(lengths, Y[i_seed,:]/1024, label = "data",
marker="+",color="purple")
plt.plot(np.array(lengths)-0.5,azp_summary['mean']['AB[0]']\
*azp_summary['mean']['alpha']**lengths+\
azp_summary['mean']['AB[1]']-0.002,'o-',color="c")
plt.plot(np.array(lengths)+0.5,azh_summary['mean']['AB[0]']\
*azh_summary['mean']['alpha']**\
lengths+azh_summary['mean']['AB[1]']+0.002,'o-',color="orange")
plt.legend(("Pooled Model",
"Hierarchical Model"))
plt.set_title("RB_process: standard "+str(q_list)+\
", device: "+hardware+', backend: '+backend.name(),
fontsize=14);
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
import qiskit_experiments as qe
rb = qe.randomized_benchmarking
from scipy.optimize import curve_fit
# import the bayesian packages
import pymc3 as pm
import arviz as az
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
# choice of "simulation, "real", "retrieve"
option = "simulation"
# Determine the backend
if option == "simulation":
from qiskit.test.mock import FakeAthens
backend = FakeAthens()
hardware = 'ibmq_athens' # hardware reference
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_athens')
backend = device
hardware = device.name() # hardware used
# RB design
q_list = [0,1]
lengths = [1, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400, 450, 500]
num_samples = 5
seed = 1010
num_qubits = len(q_list)
scale = (2 ** num_qubits - 1) / (2 ** num_qubits)
shots = 1024
if option != "retrieve":
exp = rb.RBExperiment(q_list, lengths, num_samples=num_samples, seed=seed)
eda= exp.run(backend)
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# one or two qubits for retrieve and for the plots
list_bitstring = ['0', '00']
# obtain the current count values
Y_list = []
if option == "simulation":
for i_sample in range(num_samples*len(lengths)):
Y_list.append(eda.data[i_sample]['counts']\
[eda.data[i_sample]['metadata']['ylabel']])
else: # specify job (fresh job or run some time ago)
job = backend.retrieve_job('6097aed0a4885edfb19508fa') # athens 01'
for rbseed, result in enumerate(job.result().get_counts()):
total_counts = 0
for key,val in result.items():
if key in list_bitstring:
total_counts += val
Y_list.append(total_counts)
Y = np.array(Y_list).reshape(num_samples, len(lengths))
#get LSF EPC and priors
if option != "retrieve":
popt = eda._analysis_results[0]['popt']
pcov = eda._analysis_results[0]['pcov']
else: # manual entry (here for job ''6097aed0a4885edfb19508fa')
popt = [0.7207075, 0.95899375, 0.2545933 ]
pcov = [[ 2.53455272e-04, -9.10001034e-06, -1.29839515e-05],
[-9.10001034e-06, 9.55193998e-06, -7.07822420e-06],
[-1.29839515e-05, -7.07822420e-06, 2.07452483e-05]]
alpha_ref=popt[1]
mu_AB= [popt[0],popt[2]]
alpha_ref_err = np.sqrt(pcov[1][1])
EPC = scale*(1-alpha_ref)
EPC_err = scale*alpha_ref_err
cov_AB= [0.0001, 0.0001]
alpha_lower=0.8
alpha_upper=0.999
sigma_theta = .004
pooled_model = bf.get_bayesian_model(model_type="pooled",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(pooled_model)
with pooled_model:
trace_p= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_p)
with pooled_model:
azp_summary = az.summary(trace_p, hdi_prob=.94, round_to=6, kind="all")
azp_summary
epc_p =scale*(1 - azp_summary['mean']['alpha'])
epc_p_err = scale* (azp_summary['sd']['alpha'])
with pooled_model:
ax = az.plot_posterior(trace_p, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Pooled Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_p, epc_p_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
hierarchical_model = bf.get_bayesian_model(model_type="hierarchical",
Y=Y,shots=1024,m_gates=lengths,
alpha_ref = alpha_ref,
mu_AB=mu_AB,cov_AB=cov_AB)
pm.model_to_graphviz(hierarchical_model)
with hierarchical_model:
trace_h= pm.sample(draws = 2000, tune= 10000, target_accept=0.97,
return_inferencedata=True)
az.plot_trace(trace_h)
with hierarchical_model:
azh_summary = az.summary(trace_h, hdi_prob=.94, round_to=6,
kind="all", var_names=["~GSP"])
azh_summary
epc_h =scale*(1 - azh_summary['mean']['alpha'])
epc_h_err = scale* (azh_summary['sd']['alpha'])
with hierarchical_model:
ax = az.plot_posterior(trace_h, var_names=['alpha'], round_to=4, point_estimate=None,
transform = alpha_to_EPC, textsize = 10.0, color='b')
ax.set_title("RB_process: standard "+str(q_list)+", backend: "+backend.name(),
fontsize=12)
Bayes_legend ="Bayesian Hierarchical Model:\n EPC {0:1.3e} ± {1:1.3e}".format(epc_h, epc_h_err)
ax.axvline(x=EPC,color='r',ls=":")
Fitter_legend ="Frequentist model:\n EPC {0:1.3e} ± {1:1.3e}".format(EPC, EPC_err)
ax.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
Fitter_legend),fontsize=10 )
# compare LSF and SMC
print("Model: Frequentist Bayesian")
print(" LSF pooled hierarchical")
print("EPC {0:.5f} {1:.5f} {2:.5f}"
.format(EPC, epc_p, epc_h))
print("ERROR ± {0:.5f} ± {1:.5f} ± {2:.5f}"
.format(EPC_err, epc_p_err, epc_h_err))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
#fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
fig, plt = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(num_samples):
plt.scatter(lengths, Y[i_seed,:]/1024, label = "data",
marker="+",color="purple")
plt.plot(np.array(lengths)-0.5,azp_summary['mean']['AB[0]']\
*azp_summary['mean']['alpha']**lengths+\
azp_summary['mean']['AB[1]']-0.002,'o-',color="c")
plt.plot(np.array(lengths)+0.5,azh_summary['mean']['AB[0]']\
*azh_summary['mean']['alpha']**\
lengths+azh_summary['mean']['AB[1]']+0.002,'o-',color="orange")
plt.legend(("Pooled Model",
"Hierarchical Model"))
plt.set_title("RB_process: standard "+str(q_list)+\
", device: "+hardware+', backend: '+backend.name(),
fontsize=14);
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
import time
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
def obtain_priors_and_data_from_fitter(rbfit, nCliffs, shots, printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
#alpha_lower = alpha_ref - 6*rbfit._fit[0]['params_err'][1]
#alpha_upper = alpha_ref + 6*rbfit._fit[0]['params_err'][1]
alpha_lower = .95*alpha_ref
alpha_upper = min(1.05*alpha_ref,1.0)
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigma theta:
sigma_theta = 0.004 # WIP
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
# modified for accelerated BM with EPCest as extra parameter
def get_bayesian_model(model_type,Y,shots,m_gates,mu_AB,cov_AB, alpha_ref,
alpha_lower=0.5,alpha_upper=0.999,alpha_testval=0.9,
p_lower=0.9,p_upper=0.999,p_testval=0.95,
RvsI=None,IvsR=None,sigma_theta=0.001,
sigma_theta_l=0.0005,sigma_theta_u=0.0015):
# Bayesian model
# from https://iopscience.iop.org/arti=RvsI, cle/1sigma_theta=0.004,0.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
# note: shots can be used in place of total_shots (broadcast)
# to be checked however for all models
total_shots = np.full(Y.shape, shots)
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
if model_type == "hierarchical":
GSP = AB[0]*alpha**m_gates + AB[1]
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_h", p=theta, observed=Y,
n = total_shots)
elif model_type == "h_sigma":
sigma_t = pm.Uniform("sigma_t", testval = sigma_theta,
upper = sigma_theta_u, lower = sigma_theta_l)
GSP = AB[0]*alpha**m_gates + AB[1]
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_t,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_h", p=theta, observed=Y,
n = total_shots)
elif model_type == "tilde":
p_tilde = pm.Uniform("p_tilde",lower=p_lower,
upper=p_upper, testval = p_testval)
GSP = AB[0]*(RvsI*alpha**m_gates + IvsR*(alpha*p_tilde)**m_gates) + AB[1]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_t", p=GSP, observed=Y,
n = total_shots)
elif model_type == "h_tilde":
p_tilde = pm.Uniform("p_tilde",lower=p_lower,
upper=p_upper, testval = p_testval)
sigma_t = pm.Uniform("sigma_t", testval = sigma_theta,
upper = sigma_theta_u, lower = sigma_theta_l)
GSP = AB[0]*(RvsI*alpha**np.tile(m_gates,2) +\
IvsR*(alpha*p_tilde)**np.tile(m_gates,2)) \
+ AB[1]
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_t,
shape = ((2*len(m_gates,))) )
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_t", p= theta, observed = Y, n = shots)
else: # default model "pooled"
GSP = AB[0]*alpha**m_gates + AB[1]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_p", p=GSP, observed=Y,
n = total_shots)
return RB_model
#.reshape( ((Y.shape[1],Y.shape[0])) )
# .reshape( ((len(m_gates),2)) )
# HERE WE USE FOUR UNIFORMS
def build_bayesian_model(model_type,Y,shots,m_gates,mu_AB=None,cov_AB=None, alpha_ref=None,
alpha_lower=0.5,alpha_upper=0.999,alpha_testval=0.9,
p_lower=0.9,p_upper=0.999,p_testval=0.95,
popt = None, pcov = None,
RvsI=None,IvsR=None,sigma_theta=0.001,
sigma_theta_l=0.0005,sigma_theta_u=0.0015):
# Bayesian model
# from https://iopscience.iop.org/arti=RvsI, cle/1sigma_theta=0.004,0.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
#Priors for unknown model parameters
BoundedUniform = pm.Bound(pm.Uniform,
lower=np.fmax(popt-0.1, np.full(popt.shape,1.e-9)),
upper=np.fmin(popt+0.1, np.full(popt.shape,1.-1e-9)))
if model_type == "hierarchical":
# legacy: sigma_theta is a scalar
pi = BoundedUniform("A_α_B", testval = popt, shape = popt.shape)
GSP = pi[0]*pi[1]**m_gates + pi[2]
theta = pm.Beta('θ',
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_h", p=theta, observed=Y,
n = shots)
elif model_type == "h_sigma":
pi = BoundedUniform("A_α_B", testval = popt, shape = popt.shape)
sigma_t = pm.Uniform("σ Beta", testval = sigma_theta,
upper = sigma_theta_u, lower = sigma_theta_l)
GSP = pi[0]*pi[1]**m_gates + pi[2]
theta = pm.Beta('θ',
mu=GSP,
sigma = sigma_t,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = shots)
elif model_type == "tilde":
pi = BoundedUniform("A_α_ƥ_B", testval = popt, shape = popt.shape)
GSP = pi[0]*(RvsI*pi[1]**m_gates + IvsR*(pi[1]*pi[2])**m_gates) + pi[3]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts_t", p=GSP, observed=Y,
n = shots)
elif model_type == "h_tilde":
pi = BoundedUniform("A_α_ƥ_B",testval = popt, shape = popt.shape)
sigma_t = pm.Uniform("σ Beta", testval = sigma_theta,
upper = sigma_theta_u, lower = sigma_theta_l)
GSP = pi[0]*(RvsI*pi[1]**np.tile(m_gates,2) +\
IvsR*(pi[1]*pi[2])**np.tile(m_gates,2)) \
+ pi[3]
theta = pm.Beta('θ',
mu=GSP,
sigma = sigma_t,
shape = ((2*len(m_gates,))) )
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p= theta, observed = Y, n = shots)
else: # default model "pooled"
pi = BoundedUniform("A_α_B", testval = popt, shape = popt.shape)
GSP = pi[0]*pi[1]**m_gates + pi[2]
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=GSP, observed=Y,
n = shots)
return RB_model
#.reshape( ((Y.shape[1],Y.shape[0])) )
# .reshape( ((len(m_gates),2)) )
def get_bayesian_model_hierarchical(model_type,Y): # modified for accelerated BM with EPCest as extra parameter
# maintained for backward compatibility, deprecated
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RBH_model = pm.Model()
with RBH_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RBH_model
def get_trace(RB_model, draws = 2000, tune= 10000, target_accept=0.95, return_inferencedata=True):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = draws, tune= tune, target_accept=target_accept,
return_inferencedata=return_inferencedata)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, round_to=6, hdi_prob=.94, kind='stats'):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=round_to, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior) # deprecated, should use scale
#def alpha_to_EPC(alpha):
#return 3*(1-alpha)/4
def get_EPC_and_legends(rbfit,azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
if pred_epc > 0.0:
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
else:
pred_epc_legend = ''
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace,m_name,rbfit):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(rbfit,azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford "+RB_process+" device: "+hardware
+' backend: '+backend.name()+' model:'+m_name,
fontsize=12)
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
if pred_epc > 0.0:
plt.axvline(x=pred_epc,color='green')
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10)
else:
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs,m_name,rbfit):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(rbfit,azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
#axes.plot(m_gates,azs['mean']['GSP'],'--') # WIP
#axes.errorbar(m_gates, azs['mean']['GSP'], azs['sd']['GSP'], linestyle='None', marker='^') # WIP
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates-0.25, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
axes.set_title(RB_process+" device: "+hardware+' backend: '+backend.name()+' model:'+m_name,
fontsize=14) # WIP
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
def get_and_run_seeds(rb_circs, shots, backend, coupling_map,
basis_gates, noise_model, retrieve_list=[]):
#basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
result_list = []
transpile_list = []
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
optimization_level=0,
basis_gates=basis_gates)
print('Runing seed %d'%rb_seed)
if retrieve_list == []:
if noise_model == None: # this indicates harware run
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend,
coupling_map=coupling_map,
basis_gates=basis_gates)
else:
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend,
coupling_map=coupling_map,
noise_model=noise_model,
basis_gates=basis_gates)
job_monitor(job)
else:
job = backend.retrieve_job(retrieve_list[rb_seed])
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Jobs")
return result_list, transpile_list
def get_count_data(result_list, nCliffs):
### another way to obtain the observed counts
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for rbseed, result in enumerate(result_list):
row_list = []
for c_index, c_value in enumerate(nCliffs) :
total_counts = 0
for key,val in result.get_counts()[c_index].items():
if key in list_bitstring:
total_counts += val
#print(key,val,total_counts)
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
# This section for the LS fit in this model pooling
# data from 2Q and 3Q interleave processes
def func(x, a, b, c):
return a * b ** x + c
def epc_fitter_when_mixed_2Q_3Q_RB(X,Y1,Y2,shots,check_plot=False):
xdata = np.array(list(X)*Y1.shape[0]) # must be something simpler
ydata1 = np.ravel(Y1)/shots
popt, pcov = curve_fit(func, xdata, ydata1)
perr= np.sqrt(np.diag(pcov))
ydata2 = np.ravel(Y2)/shots
popt2, pcov2 = curve_fit(func, xdata, ydata2)
perr2= np.sqrt(np.diag(pcov2))
if check_plot:
import matplotlib.pyplot as plt
plt.plot(xdata, ydata1, 'bx', label='Reference')
plt.plot(xdata, ydata2, 'r+', label='Interleave')
plt.plot(X, np.mean(Y1,axis=0)/shots, 'b-', label=None)
plt.plot(X, np.mean(Y2,axis=0)/shots, 'r-', label=None)
plt.ylabel('Population of |00>')
plt.xlabel('Number of Cliffords')
plt.legend()
plt.show()
print(popt[1])
print(perr[1])
print(popt2[1])
print(perr2[1])
epc_est_fitter = 3*(1 - popt2[1]/popt[1])/4
epc_est_fitter_err = 3*(popt2[1]/popt[1])/4 * (np.sqrt(perr[1]**2 + perr2[1]**2))
return epc_est_fitter, epc_est_fitter_err
# This section for the demo with qiskit experiment
def retrieve_from_lsf(exp):
perr_fm = np.sqrt(np.diag(exp._analysis_results[0]['pcov']))
popt_fm = exp._analysis_results[0]['popt']
epc_est_fm = exp._analysis_results[0]['EPC']
epc_est_fm_err = exp._analysis_results[0]['EPC_err']
experiment_type = exp._data[0]['metadata']['experiment_type']
return perr_fm, popt_fm, epc_est_fm, epc_est_fm_err, experiment_type
def get_GSP_counts(data, x_length, data_range):
#obtain the observed counts used in the bayesian model
#corrected for accomodation pooled data from 1Q, 2Q and 3Q interleave processes
list_bitstring = ['0','00', '000', '100'] # all valid bistrings
Y_list = []
for i_samples in data_range:
row_list = []
for c_index in range(x_length) :
total_counts = 0
i_data = i_samples*x_length + c_index
for key,val in data[i_data]['counts'].items():
if key in list_bitstring:
total_counts += val
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
def RB_bayesian_results(resmodel, trace, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale, num_samples, Y, shots, physical_qubits,
interleaved_gate, backend,
EPG_dic = None, epc_calib = np.nan,
Y1 = None, Y2= None, show_plot = True,
routine = 'get_bayesian_model'):
# obtain EPC from alpha (used by az.plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
azt_summary = get_summary(resmodel, trace, kind = 'stats')
print(azt_summary,'\n')
if experiment_type == "StandardRB":
if routine == 'get_bayesian_model':
v_name = 'alpha'
alpha = azt_summary['mean']['alpha']
sd_alpha = azt_summary['sd']['alpha']
A = azt_summary['mean']['AB[0]']
B = azt_summary['mean']['AB[1]']
elif routine == 'build_bayesian_model':
v_name = 'A_α_B'
alpha = azt_summary['mean']['A_α_B[1]']
sd_alpha = azt_summary['sd']['A_α_B[1]']
A = azt_summary['mean']['A_α_B[0]']
B = azt_summary['mean']['A_α_B[2]']
epc_est_a = scale*(1 - alpha)
epc_est_a_err = scale * sd_alpha
# compare LSF and SMC
print("Model: Frequentist Bayesian")
print("_______________________________________")
print("EPC {0:1.3e} {1:1.3e} "
.format(epc_est_fm,epc_est_a))
print("± sigma ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
for i, (gate,EPG) in enumerate(EPG_dic.items()):
print("{0:<12}{1:1.3e} {2:1.3e}"
.format("EPG "+gate,EPG,EPG*epc_est_a/epc_est_fm))
if show_plot == False:
return
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("P(0)")
plt.set_xlabel("Cliffords Length")
plt.plot(lengths,A*alpha**lengths + B,'-',color="r")
for i_seed in range(num_samples):
plt.scatter(lengths, Y[i_seed,:]/shots, label = "data", marker="x",color="grey")
plt.set_title(experiment_type +', ' + "qubit: " + str(physical_qubits)\
+', backend: '+backend.name(),
fontsize=14);
elif experiment_type == "InterleavedRB":
if routine == 'get_bayesian_model':
v_name = 'p_tilde'
alpha = azt_summary['mean']['alpha']
p = azt_summary['mean']['p_tilde']
sd_p = p = azt_summary['sd']['p_tilde']
A = azt_summary['mean']['AB[0]']
B = azt_summary['mean']['AB[1]']
elif routine == 'build_bayesian_model':
v_name = 'A_α_ƥ_B'
alpha = azt_summary['mean']['A_α_ƥ_B[1]']
p = azt_summary['mean']['A_α_ƥ_B[2]']
sd_p = azt_summary['sd']['A_α_ƥ_B[2]']
A = azt_summary['mean']['A_α_ƥ_B[0]']
B= azt_summary['mean']['A_α_ƥ_B[3]']
epc_est_a = scale*(1 - p)
epc_est_a_err = scale * sd_p
# compare LSF and SMC
print("Model: Frequentist Bayesian Calibration")
print("__________________________________________________________")
print("EPC {0:1.3e} {1:1.3e} {2:1.3e}"
.format(epc_est_fm,epc_est_a,epc_calib ))
print("± sigma ± {0:1.3e} ± {1:1.3e} "
.format(epc_est_fm_err, epc_est_a_err))
if show_plot ==False:
return
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("P(0)")
plt.set_xlabel("Cliffords Length")
for i_seed in range(num_samples):
plt.scatter(lengths, Y1[i_seed,:]/shots, label = "data", marker="x",color="r")
plt.scatter(lengths, Y2[i_seed,:]/shots, label = "data", marker="+",color="orange")
plt.plot(lengths,A*alpha**lengths + B,'--',color="r")
plt.plot(lengths,A*(alpha*p)**lengths + B,'--',color="orange")
plt.legend(("Standard, SMC model",
"Interleaved, SMC model"))
plt.set_title(experiment_type +', ' + interleaved_gate + str(physical_qubits)\
+', backend: '+backend.name(),
fontsize=14);
import matplotlib.pyplot as plt # if not yet imported
#plt.rcParams["figure.figsize"] = plt.rcParamsDefault["figure.figsize"]
# to reset to default
plt.rcParams["figure.figsize"] = (8,5)
if routine == 'build_bayesian_model': # WIP
return
with resmodel:
ax = az.plot_posterior(trace, var_names=[v_name],
round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(epc_est_a - 6*epc_est_a_err, epc_est_a + 6*epc_est_a_err)
plt.axvline(x=epc_est_fm,color='cyan',ls="-")
if epc_calib != np.nan:
plt.axvline(x=epc_calib,color='r',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(experiment_type +', ' + interleaved_gate + " qubit(s):" + str(physical_qubits)\
+', backend: '+backend.name(),
fontsize=14)
Bayes_legend = "EPC SMC: {0:1.3e} ± {1:1.3e}".format(epc_est_a, epc_est_a_err)
LSF_legend = "EPC LSF: {0:1.3e} ± {1:1.3e}".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend,
Cal_legend), fontsize=12 )
else:
plt.legend((Bayes_legend, "$Highest\; density\; interval$ HDI",
LSF_legend), fontsize=12 )
# obtain EPC from alpha and scale(used by az.plot_posterior)
def alpha_to_EPC_from_scale(alpha, scale):
return scale*(1-alpha)
# guess number of shots
def guess_shots(Y):
shot_exp = 1
test_shot = np.max(Y)
while test_shot > 2**shot_exp:
shot_exp += 1
return 2**shot_exp
def bayesian_standard_RB_model():
# construct model
RB_model = get_bayesian_model(model_type="pooled",Y=Y,shots=shots,m_gates=lengths,
mu_AB=[popt_fm[0],popt_fm[2]],cov_AB=[perr_fm[0],perr_fm[2]],
alpha_ref=popt_fm[1],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
RvsI=None,IvsR=None)
return RB_model
def bayesian_interleaved_RB_model():
# construct model
RB_model = get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
alpha_ref=popt_fm[1], p_testval= popt_fm[2],
alpha_lower=popt_fm[1]-6*perr_fm[1],
alpha_upper=min(1.-1.E-6,popt_fm[1]+6*perr_fm[1]),
p_lower=popt_fm[2]-6*perr_fm[2],
p_upper=min(1.-1.E-6,popt_fm[2]+6*perr_fm[2]),
mu_AB=[popt_fm[0],popt_fm[3]],cov_AB=[perr_fm[0],perr_fm[3]],
RvsI=RvsI,IvsR=IvsR)
return RB_model
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
import numpy as np
import copy
from qiskit_experiments.library import StandardRB, InterleavedRB
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library.randomized_benchmarking import RBUtils
import qiskit.circuit.library as circuits
# for retrieving gate calibration
from datetime import datetime
import qiskit.providers.aer.noise.device as dv
# import the bayesian packages
import pymc3 as pm
import arviz as az
import unif_bayesian_fitter as bf
simulation = True # make your choice here
if simulation:
from qiskit.providers.aer import AerSimulator
from qiskit.test.mock import FakeParis
backend = AerSimulator.from_backend(FakeParis())
else:
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_lima') # type here hardware backend
# for WIP
import importlib
importlib.reload(bf)
lengths = np.arange(1, 2500, 250)
num_samples = 10
seed = 1010
qubits = [0]
# Run an RB experiment on qubit 0
exp1 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
expdata1 = exp1.run(backend).block_for_results()
results1 = expdata1.analysis_results()
# View result data
display(expdata1.figure(0))
for result in results1:
print(result)
popt = expdata1.analysis_results()[0].value.value
pcov = expdata1.analysis_results()[0].extra['covariance_mat']
epc_est_fm = expdata1.analysis_results()[2].value.value
epc_est_fm_err = expdata1.analysis_results()[2].value.stderr
EPG_dic = {}
for i in range(3,6):
EPG_key = expdata1.analysis_results()[i].name
EPG_dic[EPG_key] = expdata1.analysis_results()[i].value.value
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate =''
# get count data
Y = bf.get_GSP_counts(expdata1._data, len(lengths),range(num_samples))
expdata1._data[1]
experiment_type = expdata1._data[0]['metadata']['experiment_type']
physical_qubits = expdata1._data[0]['metadata']['physical_qubits']
shots = expdata1._data[0]['shots']
#build model
pooled_model = bf.build_bayesian_model(model_type="pooled",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov)
pm.model_to_graphviz(pooled_model)
trace_p = bf.get_trace(pooled_model, target_accept = 0.95)
# backend's recorded EPG
print(RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(pooled_model, trace_p, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
#build model
hierarchical_model = bf.build_bayesian_model(model_type="h_sigma",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(hierarchical_model)
trace_h = bf.get_trace(hierarchical_model, target_accept = 0.99)
# backend's recorded EPG
print(RBUtils.get_error_dict_from_backend(backend, qubits))
bf.RB_bayesian_results(hierarchical_model, trace_h, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic, routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate =''
physical_qubits = qubits = (1,4)
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ # defined for the 2-qubit run
lengths = np.arange(1, 200, 30)
lengths_1_qubit = np.arange(1, 2500, 250)
num_samples = 10
seed = 1010
# Run a 1-qubit RB expriment on each qubit to determine the error-per-gate of 1-qubit gates
expdata_1q = {}
epg_1q = []
for qubit in qubits:
exp = StandardRB([qubit], lengths_1_qubit, num_samples=num_samples, seed=seed)
expdata = exp.run(backend).block_for_results()
expdata_1q[qubit] = expdata
epg_1q += expdata.analysis_results()
# Run an RB experiment on qubits 1, 4
exp2 = StandardRB(qubits, lengths, num_samples=num_samples, seed=seed)
# Use the EPG data of the 1-qubit runs to ensure correct 2-qubit EPG computation
exp2.set_analysis_options(epg_1_qubit=epg_1q)
# Run the 2-qubit experiment
expdata2 = exp2.run(backend).block_for_results()
# View result data
results2 = expdata2.analysis_results()
# View result data
display(expdata2.figure(0))
for result in results2:
print(result)
# Compare the computed EPG of the cx gate with the backend's recorded cx gate error:
expected_epg = RBUtils.get_error_dict_from_backend(backend, qubits)[(qubits, 'cx')]
exp2_epg = expdata2.analysis_results("EPG_cx").value
print("Backend's reported EPG of the cx gate:", expected_epg)
print("Experiment computed EPG of the cx gate:", exp2_epg)
popt = expdata2.analysis_results()[0].value.value
pcov = expdata2.analysis_results()[0].extra['covariance_mat']
epc_est_fm = expdata2.analysis_results()[2].value.value
epc_est_fm_err = expdata2.analysis_results()[2].value.stderr
EPG_dic = {}
EPG_key = 'cx' #expdata2.analysis_results()[3].name
EPG_dic[EPG_key] = expdata2.analysis_results()[3].value.value
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
desired_gate ='cx'
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == 'cx' and tuple_e[1] == physical_qubits:
epc_calib = tuple_e[2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# get count data
Y = bf.get_GSP_counts(expdata2._data, len(lengths),range(num_samples))
experiment_type = expdata2._data[0]['metadata']['experiment_type']
physical_qubits = expdata2._data[0]['metadata']['physical_qubits']
shots = expdata2._data[0]['shots']
#build model
S2QBp_model = bf.build_bayesian_model(model_type="pooled",Y=Y,
shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov)
pm.model_to_graphviz(S2QBp_model)
trace_p2 = bf.get_trace(S2QBp_model, target_accept = 0.95)
bf.RB_bayesian_results(S2QBp_model, trace_p2, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
#build model
S2QBh_model = bf.build_bayesian_model(model_type="h_sigma",Y=Y,shots=shots,m_gates=lengths,
popt = popt,
pcov = pcov,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(S2QBh_model)
trace_h2 = bf.get_trace(S2QBh_model)
bf.RB_bayesian_results(S2QBh_model, trace_h2, lengths,
epc_est_fm,
epc_est_fm_err,
experiment_type,
scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
EPG_dic = EPG_dic,
routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate = "x"
qubits = [0]
interleaved_circuit = circuits.XGate()
lengths = np.arange(1, 2500, 250)
num_samples = 10
seed = 1010
# Run an interleaved RB experiment
int_exp1 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
# Run
int_expdata1 = int_exp1.run(backend).block_for_results()
int_results1 = int_expdata1.analysis_results()
# View result data
display(int_expdata1.figure(0))
for result in int_results1:
print(result)
popt = int_expdata1.analysis_results()[0].value.value
pcov = int_expdata1.analysis_results()[0].extra['covariance_mat']
popt[2] = popt[1]/popt[2] # replace alpha_C by p_tilde
# WIP rigorously the covariance matrix could be modified too if used
epc_est_fm = int_expdata1.analysis_results()[3].value.value
epc_est_fm_err = int_expdata1.analysis_results()[3].value.stderr
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate ='x'
# get count data
Y1 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata1._data, len(lengths),
range(1,2*num_samples,2))
int_expdata1._data[1]
experiment_type = int_expdata1._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata1._data[0]['metadata']['physical_qubits']
shots = int_expdata1._data[0]['shots']
Y=np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde1 = bf.build_bayesian_model("tilde",Y=Y,shots=shots,
m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde1)
trace_t = bf.get_trace(tilde1)
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == interleaved_gate and tuple_e[1] == qubits:
epc_calib = np.nan = tuple_e[2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
# epc_calib = 2.307E-4 + (23.6-7)*(2.193E-4 - 2.307E-4)/24
bf.RB_bayesian_results(tilde1, trace_t, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
import importlib
importlib.reload(bf)
Y=np.hstack((Y1,Y2))
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
tilde2 = bf.build_bayesian_model("h_tilde",Y=Y,shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI = RvsI_h, IvsR = IvsR_h,
sigma_theta=0.001,sigma_theta_l=0.0005,sigma_theta_u=0.0015)
pm.model_to_graphviz(tilde2)
trace_t3 = bf.get_trace(tilde2, target_accept = .95)
bf.RB_bayesian_results(tilde2, trace_t3, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
# describe RB experiment
interleaved_gate = "cx"
physical_qubits = qubits = [1,4]
interleaved_circuit = circuits.CXGate()
lengths = np.arange(1, 200, 30)
num_samples = 10
seed = 1010
t = None # enter t in datetime format if necessary
e_list = dv.gate_error_values(backend.properties()) # use properties(datetime=t) if t is defined
epc_calib = np.nan
for tuple_e in e_list:
if tuple_e[0] == interleaved_gate and tuple_e[1] == physical_qubits:
epc_calib = np.nan = tuple_e[2]
print('EPC calibration: {0:.6f}'.format(epc_calib))
# Run an interleaved RB experiment
int_exp2 = InterleavedRB(interleaved_circuit, qubits,
lengths, num_samples=num_samples, seed=seed)
# Run
int_expdata2 = int_exp2.run(backend).block_for_results()
int_results2 = int_expdata2.analysis_results()
# View result data
display(int_expdata2.figure(0))
for result in int_results2:
print(result)
popt = int_expdata2.analysis_results()[0].value.value
pcov = int_expdata2.analysis_results()[0].extra['covariance_mat']
popt[2] = popt[1]/popt[2] # replace alpha_C by p_tilde
# WIP rigorously the covariance matrix could be modified too if used
epc_est_fm = int_expdata2.analysis_results()[3].value.value
epc_est_fm_err = int_expdata2.analysis_results()[3].value.stderr
nQ = len(qubits)
scale = (2 ** nQ - 1) / 2 ** nQ
interleaved_gate ='cx'
# get count data
Y1 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(0,2*num_samples-1,2))
Y2 = bf.get_GSP_counts(int_expdata2._data, len(lengths),
range(1,2*num_samples,2))
int_expdata2._data[1]
experiment_type = int_expdata2._data[0]['metadata']['experiment_type']
physical_qubits = int_expdata2._data[0]['metadata']['physical_qubits']
shots = int_expdata2._data[0]['shots']
# example of interpolated EPC_cal for hardware experiments
# EPC0 + (t_exp - tO) * (EPC1 - EPC0) / (t1 - t0)
# code here:
# epc_calib = 2.307E-4 + (23.6-7)*(2.193E-4 - 2.307E-4)/24
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde3 = bf.build_bayesian_model("tilde",Y=Y,shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI=RvsI,IvsR=IvsR)
pm.model_to_graphviz(tilde3)
trace_t3 = bf.get_trace(tilde3)
bf.RB_bayesian_results(tilde3, trace_t3, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
import importlib
importlib.reload(bf)
# use 2m length array
RvsI_h = np.ravel(np.vstack((np.ones_like(lengths),np.zeros_like(lengths))))
IvsR_h = np.ravel(np.vstack((np.zeros_like(lengths),np.ones_like(lengths))))
tilde4 = bf.build_bayesian_model("h_tilde",Y=np.hstack((Y1,Y2)),
shots=shots, m_gates=lengths,
popt = popt,
pcov = pcov,
RvsI = RvsI_h, IvsR = IvsR_h,
sigma_theta=0.005,sigma_theta_l=0.001,sigma_theta_u=0.05)
pm.model_to_graphviz(tilde4)
trace_t4 = bf.get_trace(tilde4, target_accept = .99)
bf.RB_bayesian_results(tilde4, trace_t4, lengths,
epc_est_fm, epc_est_fm_err, experiment_type, scale,
num_samples, Y, shots, physical_qubits, interleaved_gate, backend,
epc_calib = epc_calib, Y1 = Y1, Y2 = Y2,
routine = 'build_bayesian_model')
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
# import the bayesian packages
import pymc3 as pm
import arviz as az
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
from qiskit.tools.monitor import job_monitor
device = provider.get_backend('ibmq_lima')
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
def obtain_priors_and_data_from_fitter(printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
alpha_lower = alpha_ref - 2*rbfit._fit[0]['params_err'][1] # modified for real
alpha_upper = alpha_ref + 2*rbfit._fit[0]['params_err'][1] # modified for real
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigmatheta:
sigma_theta = 0.004
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
def get_bayesian_model(model_type):
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
if model_type == "pooled":
total_shots = np.full(Y.shape, shots)
theta = GSP
elif model_type == "hierarchical":
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RB_model
def get_trace(RB_model):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = 2000, tune= 10000, target_accept=0.9, return_inferencedata=True)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, hdi_prob=.94, kind='all'):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=4, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return 3*(1-alpha)/4
def get_EPC_and_legends(azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford")
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
#plt.axvline(x=pred_epc,color='green') # WIP
#plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10 )# WIP
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
#axes.plot(m_gates,azs['mean']['GSP'],'--') # WIP
#axes.errorbar(m_gates, azs['mean']['GSP'], azs['sd']['GSP'], linestyle='None', marker='^') # WIP
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates-0.25, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
#axes.set_title('2 Qubit RB with T1/T2 Noise', fontsize=18) # WIP
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
def get_count_data(result_list):
### another way to obtain the observed counts
Y_list = []
for rbseed, result in enumerate(result_list):
row_list = []
for c_index, c_value in enumerate(nCliffs):
if nQ == 2:
list_bitstring = ['00']
elif nQ == 3:
list_bitstring = ['000', '100'] # because q2 measured in c1
total_counts = 0
for bitstring in list_bitstring:
total_counts += result.get_counts()[c_index][bitstring]
row_list.append(total_counts)
Y_list.append(row_list)
return np.array(Y_list)
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#Number of seeds (random sequences)
nseeds = 10 # more data for the Rev. Mr. Bayes
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = np.arange(1,200,20)
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
rb_opts = {}
rb_opts ['length_vector'] = nCliffs
rb_opts ['nseeds'] = nseeds
rb_opts ['rb_pattern'] = rb_pattern
rb_opts ['length_multiplier'] = length_multiplier
rb_circs , xdata = rb.randomized_benchmarking_seq(**rb_opts )
backend = device
basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
shots = 1024
result_list = []
transpile_list = []
import time
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
optimization_level=0,
basis_gates=basis_gates)
print('Runing seed %d'%rb_seed)
job = qiskit.execute(rb_circ_transpile,
shots=shots,
backend=backend)
job_monitor(job)
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Real Jobs")
print(rb_circs[0][0])
#Create an RBFitter object
rbfit = rb.RBFitter(result_list, xdata, rb_opts['rb_pattern'])
m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta =\
obtain_priors_and_data_from_fitter(printout = True)
### a check of the count matrix
np.sum((Y == (get_count_data(result_list)))*1) == Y.size
pooled = get_bayesian_model("pooled")
pm.model_to_graphviz(pooled)
trace_p = get_trace(pooled)
azp_summary = get_summary(pooled, trace_p)
azp_summary
hierarchical = get_bayesian_model("hierarchical")
pm.model_to_graphviz(hierarchical)
trace_h = get_trace(hierarchical)
azh_summary = get_summary(hierarchical, trace_h)
azh_summary
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
# predict EPC from the noisy model
#pred_epc = get_predicted_EPC(error_source = 'from_T1_T2') # this was for a noise model
pred_epc = 0.0165 # will not appear on graphs for real device but at this point functions need value (WIP)
print("Fake 2Q Error per Clifford: %e"%pred_epc)
EPC_compare_fitter_to_bayes(pooled, azp_summary, trace_p)
EPC_compare_fitter_to_bayes(hierarchical, azh_summary, trace_h)
GSP_compare_fitter_to_bayes(pooled, azp_summary)
GSP_compare_fitter_to_bayes(hierarchical, azh_summary)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
%load_ext watermark
%watermark -n -u -v -iv -w
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
import copy
# import the bayesian packages
import pymc3 as pm
import arviz as az
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
def obtain_priors_and_data_from_fitter(printout = True):
m_gates = copy.deepcopy(nCliffs)
# We choose the count matrix corresponding to 2 Qubit RB
Y = (np.array(rbfit._raw_data[0])*shots).astype(int)
# alpha prior and bounds
alpha_ref = rbfit._fit[0]['params'][1]
alpha_lower = alpha_ref - 5*rbfit._fit[0]['params_err'][1]
alpha_upper = alpha_ref + 5*rbfit._fit[0]['params_err'][1]
# priors for A anbd B
mu_AB = np.delete(rbfit._fit[0]['params'],1)
cov_AB=np.delete(rbfit._fit[0]['params_err'],1)**2
# prior for sigmatheta:
sigma_theta = 0.004
if printout:
print("priors:\nalpha_ref",alpha_ref)
print("alpha_lower", alpha_lower, "alpha_upper", alpha_upper)
print("A,B", mu_AB, "\ncov A,B", cov_AB)
print("sigma_theta", sigma_theta)
return m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta
def get_bayesian_model(model_type):
# Bayesian model
# from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
# see https://docs.pymc.io/api/model.html
RB_model = pm.Model()
with RB_model:
#Priors for unknown model parameters
alpha = pm.Uniform("alpha",lower=alpha_lower,
upper=alpha_upper, testval = alpha_ref)
BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)
AB = BoundedMvNormal("AB", mu=mu_AB,testval = mu_AB,
cov= np.diag(cov_AB),
shape = (2))
# Expected value of outcome
GSP = AB[0]*alpha**m_gates + AB[1]
if model_type == "pooled":
total_shots = np.full(Y.shape, shots)
theta = GSP
elif model_type == "hierarchical":
total_shots = np.full(Y.shape, shots)
theta = pm.Beta("GSP",
mu=GSP,
sigma = sigma_theta,
shape = Y.shape[1])
# Likelihood (sampling distribution) of observations
p = pm.Binomial("Counts", p=theta, observed=Y,
n = total_shots)
return RB_model
def get_trace(RB_model):
# Gradient-based sampling methods
# see also: https://docs.pymc.io/notebooks/sampler-stats.html
# and https://docs.pymc.io/notebooks/api_quickstart.html
with RB_model:
trace= pm.sample(draws = 2000, tune= 10000, target_accept=0.9, return_inferencedata=True)
with RB_model:
az.plot_trace(trace);
return trace
def get_summary(RB_model, trace, hdi_prob=.94, kind='all'):
with RB_model:
# (hdi_prob=.94 is default)
az_summary = az.summary(trace, round_to=4, hdi_prob=hdi_prob, kind=kind )
return az_summary
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return 3*(1-alpha)/4
def get_EPC_and_legends(azs):
EPC_Bayes = alpha_to_EPC(azs['mean']['alpha'])
EPC_Bayes_err = EPC_Bayes - alpha_to_EPC(azs['mean']['alpha']+azs['sd']['alpha'])
Bayes_legend ="EPC Bayes {0:.5f} ({1:.5f})".format(EPC_Bayes, EPC_Bayes_err)
Fitter_legend ="EPC Fitter {0:.5f} ({1:.5f})".format(rbfit.fit[0]['epc']\
,rbfit._fit[0]['epc_err'])
pred_epc_legend = "EPC predicted {0:.5f}".format(pred_epc)
return EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend
def EPC_compare_fitter_to_bayes(RB_model, azs, trace):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend, pred_epc_legend = get_EPC_and_legends(azs)
with RB_model:
az.plot_posterior(trace, var_names=['alpha'], round_to=4,
transform = alpha_to_EPC, point_estimate=None)
plt.title("Error per Clifford")
plt.axvline(x=alpha_to_EPC(alpha_ref),color='red')
plt.axvline(x=pred_epc,color='green')
plt.legend((Bayes_legend, "Higher density interval",Fitter_legend, pred_epc_legend), fontsize=10 )
plt.show()
def GSP_compare_fitter_to_bayes(RB_model, azs):
EPC_Bayes, EPC_Bayes_err, Bayes_legend,Fitter_legend,_ = get_EPC_and_legends(azs)
# plot ground state population ~ Clifford length
fig, axes = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
axes.set_ylabel("Ground State Population")
axes.set_xlabel("Clifford Length")
axes.plot(m_gates, np.mean(Y/shots,axis=0), 'r.')
axes.plot(m_gates,azs['mean']['AB[0]']*azs['mean']['alpha']**m_gates+azs['mean']['AB[1]'],'--')
axes.plot(m_gates,mu_AB[0]*np.power(alpha_ref,m_gates)+mu_AB[1],':')
for i_seed in range(nseeds):
plt.scatter(m_gates, Y[i_seed,:]/shots, label = "data", marker="x")
axes.legend(["Mean Observed Frequencies",
"Bayesian Model\n"+Bayes_legend,
"Fitter Model\n"+Fitter_legend],fontsize=12)
#axes.set_title('2 Qubit RB with T1/T2 Noise', fontsize=18)
def get_predicted_EPC(error_source):
#Count the number of single and 2Q gates in the 2Q Cliffords
gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0])
for basis_gate in basis_gates:
print("Number of %s gates per Clifford: %f "%(basis_gate ,
np.mean([gates_per_cliff[rb_pattern[0][0]][basis_gate],
gates_per_cliff[rb_pattern[0][1]][basis_gate]])))
# Calculate the predicted epc
# from the known depolarizing errors on the simulation
if error_source == "depolarization":
# Error per gate from noise model
epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2}
epg_2q = p2Q*3/4
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 2],
list_epgs_1q=[epgs_1q, epgs_1q])
# using the predicted primitive gate errors from the coherence limit
if error_source == "from_T1_T2":
# Predicted primitive gate errors from the coherence limit
u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q)
u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q)
epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q)
epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error}
pred_epc = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gates_per_cliff,
epg_2q=epg_2q,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_1q, epgs_1q])
return pred_epc
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#Number of seeds (random sequences)
nseeds = 8
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = np.arange(1,200,20)
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[0,2],[1]]
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
rb_pattern[0][1]
rb_opts = {}
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
rb_circs, xdata = rb.randomized_benchmarking_seq(**rb_opts)
print(rb_circs[0][0])
noise_model = NoiseModel()
p1Q = 0.004 # this was doubled with respect to the original example
p2Q = 0.02 # this was doubled with respect to the original example
noise_model.add_all_qubit_quantum_error(depolarizing_error(p1Q, 1), 'u2')
noise_model.add_all_qubit_quantum_error(depolarizing_error(2*p1Q, 1), 'u3')
noise_model.add_all_qubit_quantum_error(depolarizing_error(p2Q, 2), 'cx')
backend = qiskit.Aer.get_backend('qasm_simulator')
basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
shots = 1024
result_list = []
transpile_list = []
import time
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed,
basis_gates=basis_gates)
print('Simulating seed %d'%rb_seed)
job = qiskit.execute(rb_circ_transpile, noise_model=noise_model,
shots=shots,
backend=backend, max_parallel_experiments=0)
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Simulating")
#Create an RBFitter object
rbfit = rb.fitters.RBFitter(result_list, xdata, rb_opts['rb_pattern'])
m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta =\
obtain_priors_and_data_from_fitter(printout = True)
pooled = get_bayesian_model("pooled")
pm.model_to_graphviz(pooled)
trace_p = get_trace(pooled)
azp_summary = get_summary(pooled, trace_p)
azp_summary
hierarchical = get_bayesian_model("hierarchical")
pm.model_to_graphviz(hierarchical)
trace_h = get_trace(hierarchical)
azh_summary = get_summary(hierarchical, trace_h)
azh_summary
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
# predict EPC from the noisy model
pred_epc = get_predicted_EPC(error_source = 'depolarization')
print("Predicted 2Q Error per Clifford: %e"%pred_epc)
EPC_compare_fitter_to_bayes(pooled, azp_summary, trace_p)
EPC_compare_fitter_to_bayes(hierarchical, azh_summary, trace_h)
GSP_compare_fitter_to_bayes(pooled, azp_summary)
GSP_compare_fitter_to_bayes(hierarchical, azh_summary)
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#Number of seeds (random sequences)
nseeds = 10 # more data for the Rev. Mr. Bayes
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = np.arange(1,200,20)
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
rb_opts = {}
rb_opts ['length_vector'] = nCliffs
rb_opts ['nseeds'] = nseeds
rb_opts ['rb_pattern'] = rb_pattern
rb_opts ['length_multiplier'] = length_multiplier
rb_circs , xdata = rb.randomized_benchmarking_seq(**rb_opts )
noise_model = NoiseModel()
#Add T1/T2 noise to the simulation
t1 = 100.
t2 = 80.
gate1Q = 0.2 # this was doubled with respect to the original example
gate2Q = 1.0 # this was doubled with respect to the original example
noise_model.add_all_qubit_quantum_error(thermal_relaxation_error(t1,t2,gate1Q), 'u2')
noise_model.add_all_qubit_quantum_error(thermal_relaxation_error(t1,t2,2*gate1Q), 'u3')
noise_model.add_all_qubit_quantum_error(
thermal_relaxation_error(t1,t2,gate2Q).tensor(thermal_relaxation_error(t1,t2,gate2Q)), 'cx')
backend = qiskit.Aer.get_backend('qasm_simulator')
basis_gates = ['u1','u2','u3','cx'] # use U,CX for now
shots = 1024 # a typical experimental value
result_list = []
transpile_list = []
for rb_seed,rb_circ_seed in enumerate(rb_circs):
print('Compiling seed %d'%rb_seed)
rb_circ_transpile = qiskit.transpile(rb_circ_seed, basis_gates=basis_gates)
print('Simulating seed %d'%rb_seed)
job = qiskit.execute(rb_circ_transpile, noise_model=noise_model, shots=shots,
backend=backend, max_parallel_experiments=0)
result_list.append(job.result())
transpile_list.append(rb_circ_transpile)
print("Finished Simulating")
print(rb_circs[0][0])
#Create an RBFitter object
rbfit = rb.RBFitter(result_list, xdata, rb_opts['rb_pattern'])
m_gates, Y, alpha_ref, alpha_lower, alpha_upper, mu_AB, cov_AB, sigma_theta =\
obtain_priors_and_data_from_fitter(printout = True)
pooled = get_bayesian_model("pooled")
pm.model_to_graphviz(pooled)
trace_p = get_trace(pooled)
azp_summary = get_summary(pooled, trace_p)
azp_summary
hierarchical = get_bayesian_model("hierarchical")
pm.model_to_graphviz(hierarchical)
trace_h = get_trace(hierarchical)
azh_summary = get_summary(hierarchical, trace_h)
azh_summary
# Leave-one-out Cross-validation (LOO) comparison
df_comp_loo = az.compare({"hierarchical": trace_h, "pooled": trace_p})
df_comp_loo
az.plot_compare(df_comp_loo, insample_dev=False);
# predict EPC from the noisy model
pred_epc = get_predicted_EPC(error_source = 'from_T1_T2')
print("Predicted 2Q Error per Clifford: %e"%pred_epc)
EPC_compare_fitter_to_bayes(pooled, azp_summary, trace_p)
EPC_compare_fitter_to_bayes(hierarchical, azh_summary, trace_h)
GSP_compare_fitter_to_bayes(pooled, azp_summary)
GSP_compare_fitter_to_bayes(hierarchical, azh_summary)
%load_ext watermark
%watermark -n -u -v -iv -w
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
import numpy as np
from qiskit import(
QuantumCircuit,
execute,
Aer)
from qiskit.visualization import plot_histogram
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Create a Quantum Circuit acting on the q register
circuit = QuantumCircuit(2, 2)
# Add a H gate on qubit 0
circuit.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1
circuit.cx(0, 1)
# Map the quantum measurement to the classical bits
circuit.measure([0,1], [0,1])
# Execute the circuit on the qasm simulator
job = execute(circuit, simulator, shots=1000)
# Grab results from the job
result = job.result()
# Returns counts
counts = result.get_counts(circuit)
print(counts)
# Draw the circuit
circuit.draw()
print(circuit)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright Alpine Quantum Technologies GmbH 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic example with the Qiskit AQT provider and the noisy offline simulator.
Creates a 2-qubit GHZ state.
"""
import qiskit
from qiskit import QuantumCircuit
from qiskit_aqt_provider.aqt_provider import AQTProvider
if __name__ == "__main__":
# Ways to specify an access token (in precedence order):
# - as argument to the AQTProvider initializer
# - in the AQT_TOKEN environment variable
# - if none of the above exists, default to an empty string, which restricts access
# to the default workspace only.
provider = AQTProvider("token")
# The backends() method lists all available computing backends. Printing it
# renders it as a table that shows each backend's containing workspace.
print(provider.backends())
# Retrieve a backend by providing search criteria. The search must have a single
# match. For example:
backend = provider.get_backend("offline_simulator_noise", workspace="default")
# Create a 2-qubit GHZ state
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
result = qiskit.execute(qc, backend, shots=200).result()
if result.success:
# due to the noise, also the states '01' and '10' may be populated!
print(result.get_counts())
else: # pragma: no cover
print(result.to_dict()["error"])
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, Alpine Quantum Technologies GmbH 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import uuid
from collections import Counter, defaultdict, namedtuple
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
DefaultDict,
Dict,
List,
NoReturn,
Optional,
Set,
Union,
)
import numpy as np
from qiskit import QuantumCircuit
from qiskit.providers import JobV1
from qiskit.providers.jobstatus import JobStatus
from qiskit.result.result import Result
from qiskit.utils.lazy_tester import contextlib
from tqdm import tqdm
from typing_extensions import Self, TypeAlias, assert_never
from qiskit_aqt_provider import api_models_generated
from qiskit_aqt_provider.aqt_options import AQTOptions
if TYPE_CHECKING: # pragma: no cover
from qiskit_aqt_provider.aqt_resource import AQTResource
# Tags for the status of AQT API jobs
@dataclass
class JobFinished:
"""The job finished successfully."""
status: ClassVar = JobStatus.DONE
results: Dict[int, List[List[int]]]
@dataclass
class JobFailed:
"""An error occurred during the job execution."""
status: ClassVar = JobStatus.ERROR
error: str
class JobQueued:
"""The job is queued."""
status: ClassVar = JobStatus.QUEUED
@dataclass
class JobOngoing:
"""The job is running."""
status: ClassVar = JobStatus.RUNNING
finished_count: int
class JobCancelled:
"""The job was cancelled."""
status = ClassVar = JobStatus.CANCELLED
JobStatusPayload: TypeAlias = Union[JobQueued, JobOngoing, JobFinished, JobFailed, JobCancelled]
@dataclass(frozen=True)
class Progress:
"""Progress information of a job."""
finished_count: int
"""Number of completed circuits."""
total_count: int
"""Total number of circuits in the job."""
@dataclass
class _MockProgressBar:
"""Minimal tqdm-compatible progress bar mock."""
total: int
"""Total number of items in the job."""
n: int = 0
"""Number of processed items."""
def update(self, n: int = 1) -> None:
"""Update the number of processed items by `n`."""
self.n += n
def __enter__(self) -> Self:
return self
def __exit__(*args) -> None:
...
class AQTJob(JobV1):
_backend: "AQTResource"
def __init__(
self,
backend: "AQTResource",
circuits: List[QuantumCircuit],
options: AQTOptions,
):
"""Initialize a job instance.
Args:
backend: backend to run the job on
circuits: list of circuits to execute
options: overridden resource options for this job.
"""
super().__init__(backend, "")
self.circuits = circuits
self.options = options
self.status_payload: JobStatusPayload = JobQueued()
def submit(self) -> None:
"""Submit this job for execution.
Raises:
RuntimeError: this job was already submitted.
"""
if self.job_id():
raise RuntimeError(f"Job already submitted (ID: {self.job_id()})")
job_id = self._backend.submit(self.circuits, self.options.shots)
self._job_id = str(job_id)
def status(self) -> JobStatus:
"""Query the job's status.
Returns:
JobStatus: aggregated job status for all the circuits in this job.
"""
payload = self._backend.result(uuid.UUID(self.job_id()))
if isinstance(payload, api_models_generated.JobResponseRRQueued):
self.status_payload = JobQueued()
elif isinstance(payload, api_models_generated.JobResponseRROngoing):
self.status_payload = JobOngoing(finished_count=payload.response.finished_count)
elif isinstance(payload, api_models_generated.JobResponseRRFinished):
self.status_payload = JobFinished(
results={
int(circuit_index): [[sample.__root__ for sample in shot] for shot in shots]
for circuit_index, shots in payload.response.result.items()
}
)
elif isinstance(payload, api_models_generated.JobResponseRRError):
self.status_payload = JobFailed(error=payload.response.message)
elif isinstance(payload, api_models_generated.JobResponseRRCancelled):
self.status_payload = JobCancelled()
else: # pragma: no cover
assert_never(payload)
return self.status_payload.status
def progress(self) -> Progress:
"""Progress information for this job."""
num_circuits = len(self.circuits)
if isinstance(self.status_payload, JobQueued):
return Progress(finished_count=0, total_count=num_circuits)
if isinstance(self.status_payload, JobOngoing):
return Progress(
finished_count=self.status_payload.finished_count, total_count=num_circuits
)
# if the circuit is finished, failed, or cancelled, it is completed
return Progress(finished_count=num_circuits, total_count=num_circuits)
@property
def error_message(self) -> Optional[str]:
"""Error message for this job (if any)."""
if isinstance(self.status_payload, JobFailed):
return self.status_payload.error
return None
def result(self) -> Result:
"""Block until all circuits have been evaluated and return the combined result.
Success or error is signalled by the `success` field in the returned Result instance.
Returns:
The combined result of all circuit evaluations.
"""
if self.options.with_progress_bar:
context: Union[tqdm[NoReturn], _MockProgressBar] = tqdm(total=len(self.circuits))
else:
context = _MockProgressBar(total=len(self.circuits))
with context as progress_bar:
def callback(
job_id: str, # noqa: ARG001
status: JobStatus, # noqa: ARG001
job: AQTJob,
) -> None:
progress = job.progress()
progress_bar.update(progress.finished_count - progress_bar.n)
# one of DONE, CANCELLED, ERROR
self.wait_for_final_state(
timeout=self.options.query_timeout_seconds,
wait=self.options.query_period_seconds,
callback=callback,
)
# make sure the progress bar completes
progress_bar.update(self.progress().finished_count - progress_bar.n)
results = []
if isinstance(self.status_payload, JobFinished):
for circuit_index, circuit in enumerate(self.circuits):
samples = self.status_payload.results[circuit_index]
meas_map = _build_memory_mapping(circuit)
data: Dict[str, Any] = {
"counts": _format_counts(samples, meas_map),
}
if self.options.memory:
data["memory"] = [
"".join(str(x) for x in reversed(states)) for states in samples
]
results.append(
{
"shots": self.options.shots,
"success": True,
"status": JobStatus.DONE,
"data": data,
"header": {
"memory_slots": circuit.num_clbits,
"creg_sizes": [[reg.name, reg.size] for reg in circuit.cregs],
"qreg_sizes": [[reg.name, reg.size] for reg in circuit.qregs],
"name": circuit.name,
"metadata": circuit.metadata or {},
},
}
)
return Result.from_dict(
{
"backend_name": self._backend.name,
"backend_version": self._backend.version,
"qobj_id": id(self.circuits),
"job_id": self.job_id(),
"success": self.status_payload.status is JobStatus.DONE,
"results": results,
# Pass error message as metadata
"error": self.error_message,
}
)
def _build_memory_mapping(circuit: QuantumCircuit) -> Dict[int, Set[int]]:
"""Scan the circuit for measurement instructions and collect qubit to classical bits mappings.
Qubits can be mapped to multiple classical bits, possibly in different classical registers.
The returned map only maps qubits referenced in a `measure` operation in the passed circuit.
Qubits not targeted by a `measure` operation will not appear in the returned result.
Parameters:
circuit: the `QuantumCircuit` to analyze.
Returns:
the translation map for all measurement operations in the circuit.
Examples:
>>> qc = QuantumCircuit(2)
>>> qc.measure_all()
>>> _build_memory_mapping(qc)
{0: {0}, 1: {1}}
>>> qc = QuantumCircuit(2, 2)
>>> _ = qc.measure([0, 1], [1, 0])
>>> _build_memory_mapping(qc)
{0: {1}, 1: {0}}
>>> qc = QuantumCircuit(3, 2)
>>> _ = qc.measure([0, 1], [0, 1])
>>> _build_memory_mapping(qc)
{0: {0}, 1: {1}}
>>> qc = QuantumCircuit(4, 6)
>>> _ = qc.measure([0, 1, 2, 3], [2, 3, 4, 5])
>>> _build_memory_mapping(qc)
{0: {2}, 1: {3}, 2: {4}, 3: {5}}
>>> qc = QuantumCircuit(3, 4)
>>> qc.measure_all(add_bits=False)
>>> _build_memory_mapping(qc)
{0: {0}, 1: {1}, 2: {2}}
>>> qc = QuantumCircuit(3, 3)
>>> _ = qc.x(0)
>>> _ = qc.measure([0], [2])
>>> _ = qc.y(1)
>>> _ = qc.measure([1], [1])
>>> _ = qc.x(2)
>>> _ = qc.measure([2], [0])
>>> _build_memory_mapping(qc)
{0: {2}, 1: {1}, 2: {0}}
5 qubits in two registers:
>>> from qiskit import QuantumRegister, ClassicalRegister
>>> qr0 = QuantumRegister(2)
>>> qr1 = QuantumRegister(3)
>>> cr = ClassicalRegister(2)
>>> qc = QuantumCircuit(qr0, qr1, cr)
>>> _ = qc.measure(qr0, cr)
>>> _build_memory_mapping(qc)
{0: {0}, 1: {1}}
Multiple mapping of a qubit:
>>> qc = QuantumCircuit(3, 3)
>>> _ = qc.measure([0, 1], [0, 1])
>>> _ = qc.measure([0], [2])
>>> _build_memory_mapping(qc)
{0: {0, 2}, 1: {1}}
"""
field = namedtuple("field", "offset,size")
# quantum memory map
qregs = {}
offset = 0
for qreg in circuit.qregs:
qregs[qreg] = field(offset, qreg.size)
offset += qreg.size
# classical memory map
clregs = {}
offset = 0
for creg in circuit.cregs:
clregs[creg] = field(offset, creg.size)
offset += creg.size
qu2cl: DefaultDict[int, Set[int]] = defaultdict(set)
for instruction in circuit.data:
operation = instruction.operation
if operation.name == "measure":
for qubit, clbit in zip(instruction.qubits, instruction.clbits):
qubit_index = qregs[qubit.register].offset + qubit.index
clbit_index = clregs[clbit.register].offset + clbit.index
qu2cl[qubit_index].add(clbit_index)
return dict(qu2cl)
def _shot_to_int(
fluorescence_states: List[int], qubit_to_bit: Optional[Dict[int, Set[int]]] = None
) -> int:
"""Format the detected fluorescence states from a single shot as an integer.
This follows the Qiskit ordering convention, where bit 0 in the classical register is mapped
to bit 0 in the returned integer. The first classical register in the original circuit
represents the least-significant bits in the interger representation.
An optional translation map from the quantum to the classical register can be applied.
If given, only the qubits registered in the translation map are present in the return value,
at the index given by the translation map.
Parameters:
fluorescence_states: detected fluorescence states for this shot
qubit_to_bit: optional translation map from quantum register to classical register positions
Returns:
integral representation of the shot result, with the translation map applied.
Examples:
Without a translation map, the natural mapping is used (n -> n):
>>> _shot_to_int([1])
1
>>> _shot_to_int([0, 0, 1])
4
>>> _shot_to_int([0, 1, 1])
6
Swap qubits 1 and 2 in the classical register:
>>> _shot_to_int([1, 0, 1], {0: {0}, 1: {2}, 2: {1}})
3
If the map is partial, only the mapped qubits are present in the output:
>>> _shot_to_int([1, 0, 1], {1: {2}, 2: {1}})
2
One can translate into a classical register larger than the
qubit register.
Warning: the classical register is always initialized to 0.
>>> _shot_to_int([1], {0: {1}})
2
>>> _shot_to_int([0, 1, 1], {0: {3}, 1: {4}, 2: {5}}) == (0b110 << 3)
True
or with a map larger than the qubit space:
>>> _shot_to_int([1], {0: {0}, 1: {1}})
1
Consider the typical example of two quantum registers (the second one contains
ancilla qubits) and one classical register:
>>> from qiskit import QuantumRegister, ClassicalRegister
>>> qr_meas = QuantumRegister(2)
>>> qr_ancilla = QuantumRegister(3)
>>> cr = ClassicalRegister(2)
>>> qc = QuantumCircuit(qr_meas, qr_ancilla, cr)
>>> _ = qc.measure(qr_meas, cr)
>>> tr_map = _build_memory_mapping(qc)
We assume that a single shot gave the result:
>>> ancillas = [1, 1, 0]
>>> meas = [1, 0]
Then the corresponding output is 0b01 (measurement qubits mapped straight
to the classical register of length 2):
>>> _shot_to_int(meas + ancillas, tr_map) == 0b01
True
One can overwrite qr_meas[1] with qr_ancilla[0]:
>>> _ = qc.measure(qr_ancilla[0], cr[1])
>>> tr_map = _build_memory_mapping(qc)
>>> _shot_to_int(meas + ancillas, tr_map) == 0b11
True
"""
tr_map = qubit_to_bit or {}
if tr_map:
# allocate a zero-initialized classical register
# TODO: support pre-initialized classical registers
clbits = max(max(d) for d in tr_map.values()) + 1
creg = [0] * clbits
for src_index, dest_indices in tr_map.items():
# the translation map could map more than just the measured qubits
with contextlib.suppress(IndexError):
for dest_index in dest_indices:
creg[dest_index] = fluorescence_states[src_index]
else:
creg = fluorescence_states.copy()
return int((np.left_shift(1, np.arange(len(creg))) * creg).sum())
def _format_counts(
samples: List[List[int]], qubit_to_bit: Optional[Dict[int, Set[int]]] = None
) -> Dict[str, int]:
"""Format all shots results from a circuit evaluation.
The returned dictionary is compatible with Qiskit's `ExperimentResultData`
`counts` field.
Keys are hexadecimal string representations of the detected states, with the
optional `QuantumRegister` to `ClassicalRegister` applied. Values are the occurrences
of the keys.
Parameters:
samples: detected qubit fluorescence states for all shots
qubit_to_bit: optional quantum to classical register translation map
Returns:
collected counts, for `ExperimentResultData`.
Examples:
>>> _format_counts([[1, 0, 0], [0, 1, 0], [1, 0, 0]])
{'0x1': 2, '0x2': 1}
>>> _format_counts([[1, 0, 0], [0, 1, 0], [1, 0, 0]], {0: {2}, 1: {1}, 2: {0}})
{'0x4': 2, '0x2': 1}
"""
return dict(Counter(hex(_shot_to_int(shot, qubit_to_bit)) for shot in samples))
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, Alpine Quantum Technologies GmbH 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import warnings
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
Type,
TypeVar,
Union,
)
from uuid import UUID
from qiskit import QuantumCircuit
from qiskit.circuit.library import RXGate, RXXGate, RZGate
from qiskit.circuit.measure import Measure
from qiskit.circuit.parameter import Parameter
from qiskit.providers import BackendV2 as Backend
from qiskit.providers import Options as QiskitOptions
from qiskit.providers.models import BackendConfiguration
from qiskit.transpiler import Target
from qiskit_aer import AerJob, AerSimulator, noise
from qiskit_aqt_provider import api_models
from qiskit_aqt_provider.aqt_job import AQTJob
from qiskit_aqt_provider.aqt_options import AQTOptions
from qiskit_aqt_provider.circuit_to_aqt import circuits_to_aqt_job
if TYPE_CHECKING: # pragma: no cover
from qiskit_aqt_provider.aqt_provider import AQTProvider
TargetT = TypeVar("TargetT", bound=Target)
def make_transpiler_target(target_cls: Type[TargetT], num_qubits: int) -> TargetT:
"""Factory for transpilation targets of AQT resources.
Args:
target_cls: base class to use for the returned instance
num_qubits: maximum number of qubits supported by the resource.
Returns:
A Qiskit transpilation target for an AQT resource.
"""
target: TargetT = target_cls(num_qubits=num_qubits)
theta = Parameter("θ")
lam = Parameter("λ")
# configure the transpiler to use RX/RZ/RXX
# the custom scheduling pass rewrites RX to R to comply to the Arnica API format.
target.add_instruction(RZGate(lam))
target.add_instruction(RXGate(theta))
target.add_instruction(RXXGate(theta))
target.add_instruction(Measure())
return target
class AQTResource(Backend):
def __init__(
self,
provider: "AQTProvider",
*,
workspace_id: str,
resource_id: str,
resource_name: str,
resource_type: Literal["device", "simulator", "offline_simulator"],
):
super().__init__(name=resource_id, provider=provider)
self.resource_id = resource_id
self.resource_name = resource_name
self.resource_type = resource_type
self.workspace_id = workspace_id
self._http_client = provider._http_client
num_qubits = 20
self._configuration = BackendConfiguration.from_dict(
{
"backend_name": resource_name,
"backend_version": 2,
"url": provider.portal_url,
"simulator": True,
"local": False,
"coupling_map": None,
"description": "AQT trapped-ion device simulator",
"basis_gates": ["r", "rz", "rxx"], # the actual basis gates
"memory": True,
"n_qubits": num_qubits,
"conditional": False,
"max_shots": 200,
"max_experiments": 1,
"open_pulse": False,
"gates": [
{"name": "rz", "parameters": ["theta"], "qasm_def": "TODO"},
{"name": "r", "parameters": ["theta", "phi"], "qasm_def": "TODO"},
{"name": "rxx", "parameters": ["theta"], "qasm_def": "TODO"},
],
}
)
self._target = make_transpiler_target(Target, num_qubits)
self._options = AQTOptions()
def submit(self, circuits: List[QuantumCircuit], shots: int) -> UUID:
"""Submit a quantum circuits job to the AQT backend.
Args:
circuits: circuits to execute
shots: number of repetitions per circuit.
Returns:
The unique identifier of the submitted job.
"""
payload = circuits_to_aqt_job(circuits, shots)
resp = self._http_client.post(
f"/submit/{self.workspace_id}/{self.resource_id}", json=payload.dict()
)
resp.raise_for_status()
return api_models.Response.parse_obj(resp.json()).job.job_id
def result(self, job_id: UUID) -> api_models.JobResponse:
"""Query the result for a specific job.
Parameters:
job_id: The unique identifier for the target job.
Returns:
Full returned payload.
"""
resp = self._http_client.get(f"/result/{job_id}")
resp.raise_for_status()
return api_models.Response.parse_obj(resp.json())
def configuration(self) -> BackendConfiguration:
warnings.warn(
"The configuration() method is deprecated and will be removed in a "
"future release. Instead you should access these attributes directly "
"off the object or via the .target attribute. You can refer to qiskit "
"backend interface transition guide for the exact changes: "
"https://qiskit.org/documentation/apidoc/providers.html#backendv1-backendv2",
DeprecationWarning,
)
return self._configuration
def properties(self) -> None:
warnings.warn( # pragma: no cover
"The properties() method is deprecated and will be removed in a "
"future release. Instead you should access these attributes directly "
"off the object or via the .target attribute. You can refer to qiskit "
"backend interface transition guide for the exact changes: "
"https://qiskit.org/documentation/apidoc/providers.html#backendv1-backendv2",
DeprecationWarning,
)
@property
def max_circuits(self) -> int:
return 2000
@property
def target(self) -> Target:
return self._target
@classmethod
def _default_options(cls) -> QiskitOptions:
return QiskitOptions()
@property
def options(self) -> AQTOptions:
return self._options
def get_scheduling_stage_plugin(self) -> str:
return "aqt"
def get_translation_stage_plugin(self) -> str:
return "aqt"
def run(self, circuits: Union[QuantumCircuit, List[QuantumCircuit]], **options: Any) -> AQTJob:
"""Submit circuits for execution on this resource.
Additional keywork arguments are treated as overrides for this resource's options.
Keywords that are not valid options for this resource are ignored with a warning.
Args:
circuits: circuits to execute
options: overrides for this resource's options.
Returns:
A job handle.
"""
if not isinstance(circuits, list):
circuits = [circuits]
valid_options = {key: value for key, value in options.items() if key in self.options}
unknown_options = set(options) - set(valid_options)
if unknown_options:
for unknown_option in unknown_options:
warnings.warn(
f"Option {unknown_option} is not used by this backend",
UserWarning,
stacklevel=2,
)
options_copy = self.options.copy()
options_copy.update_options(**valid_options)
job = AQTJob(
self,
circuits,
options_copy,
)
job.submit()
return job
def qubit_states_from_int(state: int, num_qubits: int) -> List[int]:
"""Convert the Qiskit state representation to the AQT states samples one.
Args:
state: Qiskit quantum register state representation
num_qubits: number of qubits in the register.
Returns:
AQT qubit states representation.
Raises:
ValueError: the passed state is too large for the passed register size.
Examples:
>>> qubit_states_from_int(0, 3)
[0, 0, 0]
>>> qubit_states_from_int(0b11, 3)
[1, 1, 0]
>>> qubit_states_from_int(0b01, 3)
[1, 0, 0]
>>> qubit_states_from_int(123, 7)
[1, 1, 0, 1, 1, 1, 1]
>>> qubit_states_from_int(123, 3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Cannot represent state=123 on num_qubits=3.
"""
if state.bit_length() > num_qubits:
raise ValueError(f"Cannot represent {state=} on {num_qubits=}.")
return [(state >> qubit) & 1 for qubit in range(num_qubits)]
@dataclass(frozen=True)
class SimulatorJob:
job: AerJob
circuits: List[QuantumCircuit]
shots: int
@property
def job_id(self) -> UUID:
return UUID(hex=self.job.job_id())
class OfflineSimulatorResource(AQTResource):
"""AQT-compatible offline simulator resource that uses the Qiskit-Aer backend."""
def __init__(
self,
provider: "AQTProvider",
*,
workspace_id: str,
resource_id: str,
resource_name: str,
noisy: bool,
) -> None:
super().__init__(
provider,
workspace_id=workspace_id,
resource_id=resource_id,
resource_name=resource_name,
resource_type="offline_simulator",
)
self.job: Optional[SimulatorJob] = None
if not noisy:
noise_model = None
else:
# the transpiler lowers all operations to the gate set supported by the AQT API,
# not to the resource target's one.
noise_model = noise.NoiseModel(basis_gates=["r", "rz", "rxx"])
noise_model.add_all_qubit_quantum_error(noise.depolarizing_error(0.003, 1), ["r"])
noise_model.add_all_qubit_quantum_error(noise.depolarizing_error(0.01, 2), ["rxx"])
self.simulator = AerSimulator(method="statevector", noise_model=noise_model)
@property
def noisy(self) -> bool:
return self.simulator.options.noise_model is not None
def submit(self, circuits: List[QuantumCircuit], shots: int) -> UUID:
"""Submit circuits for execution on the simulator.
Args:
circuits: circuits to execute
shots: number of repetitions per circuit.
Returns:
Unique identifier of the simulator job.
"""
self.job = SimulatorJob(
job=self.simulator.run(circuits, shots=shots),
circuits=circuits,
shots=shots,
)
return self.job.job_id
def result(self, job_id: UUID) -> api_models.JobResponse:
"""Query results for a simulator job.
Args:
job_id: identifier of the job to retrieve results for.
Returns:
AQT API payload with the job results.
Raises:
UnknownJobError: the passed identifier doesn't correspond to a simulator job
on this resource.
"""
if self.job is None or job_id != self.job.job_id:
raise api_models.UnknownJobError(str(job_id))
qiskit_result = self.job.job.result()
results: Dict[str, List[List[int]]] = {}
for circuit_index, circuit in enumerate(self.job.circuits):
samples: List[List[int]] = []
# Use data()["counts"] instead of get_counts() to access the raw counts
# instead of the classical memory-mapped ones.
counts: Dict[str, int] = qiskit_result.data(circuit_index)["counts"]
for hex_state, occurences in counts.items():
samples.extend(
[
qubit_states_from_int(int(hex_state, 16), circuit.num_qubits)
for _ in range(occurences)
]
)
results[str(circuit_index)] = samples
return api_models.Response.finished(
job_id=job_id,
workspace_id=self.workspace_id,
resource_id=self.resource_id,
results=results,
)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, Alpine Quantum Technologies GmbH 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from typing import List
from numpy import pi
from qiskit import QuantumCircuit
from qiskit_aqt_provider import api_models
def _qiskit_to_aqt_circuit(circuit: QuantumCircuit) -> api_models.Circuit:
"""Convert a Qiskit `QuantumCircuit` into a payload for AQT's quantum_circuit job type.
Args:
circuit: Qiskit circuit to convert.
Returns:
AQT API circuit payload.
"""
count = 0
qubit_map = {}
for bit in circuit.qubits:
qubit_map[bit] = count
count += 1
ops: List[api_models.OperationModel] = []
num_measurements = 0
for instruction in circuit.data:
inst = instruction[0]
qubits = [qubit_map[bit] for bit in instruction[1]]
if inst.name != "measure" and num_measurements > 0:
raise ValueError(
"Measurement operations can only be located at the end of the circuit."
)
if inst.name == "rz":
ops.append(
api_models.Operation.rz(
phi=float(inst.params[0]) / pi,
qubit=qubits[0],
)
)
elif inst.name == "r":
ops.append(
api_models.Operation.r(
phi=float(inst.params[1]) / pi,
theta=float(inst.params[0]) / pi,
qubit=qubits[0],
)
)
elif inst.name == "rxx":
ops.append(
api_models.Operation.rxx(
theta=float(inst.params[0]) / pi,
qubits=qubits[:2],
)
)
elif inst.name == "measure":
num_measurements += 1
elif inst.name == "barrier":
continue
else:
raise ValueError(f"Operation '{inst.name}' not in basis gate set: {{rz, r, rxx}}")
if not num_measurements:
raise ValueError("Circuit must have at least one measurement operation.")
ops.append(api_models.Operation.measure())
return api_models.Circuit(__root__=ops)
def circuits_to_aqt_job(circuits: List[QuantumCircuit], shots: int) -> api_models.JobSubmission:
"""Convert a list of circuits to the corresponding AQT API job request payload.
Args:
circuits: circuits to execute
shots: number of repetitions per circuit.
Returns:
JobSubmission: AQT API payload for submitting the quantum circuits job.
"""
return api_models.JobSubmission(
job_type="quantum_circuit",
label="qiskit",
payload=api_models.QuantumCircuits(
circuits=[
api_models.QuantumCircuit(
repetitions=shots,
quantum_circuit=_qiskit_to_aqt_circuit(circuit),
number_of_qubits=circuit.num_qubits,
)
for circuit in circuits
]
),
)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright Alpine Quantum Technologies GmbH 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import math
from dataclasses import dataclass
from typing import Final, List, Optional, Tuple
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Gate, Instruction
from qiskit.circuit.library import RGate, RXGate, RXXGate, RZGate
from qiskit.circuit.tools import pi_check
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler import Target
from qiskit.transpiler.basepasses import BasePass, TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.transpiler.passes import Decompose, Optimize1qGatesDecomposition
from qiskit.transpiler.passmanager import PassManager
from qiskit.transpiler.passmanager_config import PassManagerConfig
from qiskit.transpiler.preset_passmanagers import common
from qiskit.transpiler.preset_passmanagers.plugin import PassManagerStagePlugin
from qiskit_aqt_provider.utils import map_exceptions
class UnboundParametersTarget(Target):
"""Target that disables passes that require bound parameters."""
def bound_pass_manager(target: Target) -> PassManager:
"""Transpilation passes to apply on circuits after the parameters are bound.
This assumes that a preset pass manager was applied to the unbound circuits
(by setting the target to an instance of `UnboundParametersTarget`).
Args:
target: transpilation target.
"""
return PassManager(
[
# wrap the Rxx angles
WrapRxxAngles(),
# decompose the substituted Rxx gates
Decompose([f"{WrapRxxAngles.SUBSTITUTE_GATE_NAME}*"]),
# collapse the single qubit runs as ZXZ
Optimize1qGatesDecomposition(target=target),
# wrap the Rx angles, rewrite as R
RewriteRxAsR(),
]
)
def rewrite_rx_as_r(theta: float) -> Instruction:
"""Instruction equivalent to Rx(θ) as R(θ, φ) with θ ∈ [0, π] and φ ∈ [0, 2π]."""
theta = math.atan2(math.sin(theta), math.cos(theta))
phi = math.pi if theta < 0.0 else 0.0
return RGate(abs(theta), phi)
class RewriteRxAsR(TransformationPass):
"""Rewrite Rx(θ) as R(θ, φ) with θ ∈ [0, π] and φ ∈ [0, 2π]."""
@map_exceptions(TranspilerError)
def run(self, dag: DAGCircuit) -> DAGCircuit:
for node in dag.gate_nodes():
if node.name == "rx":
(theta,) = node.op.params
dag.substitute_node(node, rewrite_rx_as_r(float(theta)))
return dag
class AQTSchedulingPlugin(PassManagerStagePlugin):
def pass_manager(
self,
pass_manager_config: PassManagerConfig,
optimization_level: Optional[int] = None, # noqa: ARG002
) -> PassManager:
if isinstance(pass_manager_config.target, UnboundParametersTarget):
return PassManager([])
passes: List[BasePass] = [
# The Qiskit Target declares RX/RZ as basis gates.
# This allows decomposing any run of rotations into the ZXZ form, taking
# advantage of the free Z rotations.
# Since the API expects R/RZ as single-qubit operations,
# we rewrite all RX gates as R gates after optimizations have been performed.
RewriteRxAsR(),
]
return PassManager(passes)
@dataclass(frozen=True)
class CircuitInstruction:
"""Substitute for `qiskit.circuit.CircuitInstruction`.
Contrary to its Qiskit counterpart, this type allows
passing the qubits as integers.
"""
gate: Gate
qubits: Tuple[int, ...]
def _rxx_positive_angle(theta: float) -> List[CircuitInstruction]:
"""List of instructions equivalent to RXX(θ) with θ >= 0."""
rxx = CircuitInstruction(RXXGate(abs(theta)), qubits=(0, 1))
if theta >= 0:
return [rxx]
return [
CircuitInstruction(RZGate(math.pi), (0,)),
rxx,
CircuitInstruction(RZGate(math.pi), (0,)),
]
def _emit_rxx_instruction(theta: float, instructions: List[CircuitInstruction]) -> Instruction:
"""Collect the passed instructions into a single one labeled 'Rxx(θ)'."""
qc = QuantumCircuit(2, name=f"{WrapRxxAngles.SUBSTITUTE_GATE_NAME}({pi_check(theta)})")
for instruction in instructions:
qc.append(instruction.gate, instruction.qubits)
return qc.to_instruction()
def wrap_rxx_angle(theta: float) -> Instruction:
"""Instruction equivalent to RXX(θ) with θ ∈ [0, π/2]."""
# fast path if -π/2 <= θ <= π/2
if abs(theta) <= math.pi / 2:
operations = _rxx_positive_angle(theta)
return _emit_rxx_instruction(theta, operations)
# exploit 2-pi periodicity of Rxx
theta %= 2 * math.pi
if abs(theta) <= math.pi / 2:
operations = _rxx_positive_angle(theta)
elif abs(theta) <= 3 * math.pi / 2:
corrected_angle = theta - np.sign(theta) * math.pi
operations = [
CircuitInstruction(RXGate(math.pi), (0,)),
CircuitInstruction(RXGate(math.pi), (1,)),
]
operations.extend(_rxx_positive_angle(corrected_angle))
else:
corrected_angle = theta - np.sign(theta) * 2 * math.pi
operations = _rxx_positive_angle(corrected_angle)
return _emit_rxx_instruction(theta, operations)
class WrapRxxAngles(TransformationPass):
"""Wrap Rxx angles to [-π/2, π/2]."""
SUBSTITUTE_GATE_NAME: Final = "Rxx"
@map_exceptions(TranspilerError)
def run(self, dag: DAGCircuit) -> DAGCircuit:
for node in dag.gate_nodes():
if node.name == "rxx":
(theta,) = node.op.params
if 0 <= float(theta) <= math.pi / 2:
continue
rxx = wrap_rxx_angle(float(theta))
dag.substitute_node(node, rxx)
return dag
class AQTTranslationPlugin(PassManagerStagePlugin):
def pass_manager(
self,
pass_manager_config: PassManagerConfig,
optimization_level: Optional[int] = None, # noqa: ARG002
) -> PassManager:
translation_pm = common.generate_translation_passmanager(
target=pass_manager_config.target,
basis_gates=pass_manager_config.basis_gates,
approximation_degree=pass_manager_config.approximation_degree,
coupling_map=pass_manager_config.coupling_map,
backend_props=pass_manager_config.backend_properties,
unitary_synthesis_method=pass_manager_config.unitary_synthesis_method,
unitary_synthesis_plugin_config=pass_manager_config.unitary_synthesis_plugin_config,
hls_config=pass_manager_config.hls_config,
)
if isinstance(pass_manager_config.target, UnboundParametersTarget):
return translation_pm
passes: List[BasePass] = [WrapRxxAngles()]
return translation_pm + PassManager(passes)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
from qiskit import *
from oracle_generation import generate_oracle
get_bin = lambda x, n: format(x, 'b').zfill(n)
def gen_circuits(min,max,size):
circuits = []
secrets = []
ORACLE_SIZE = size
for i in range(min,max+1):
cur_str = get_bin(i,ORACLE_SIZE-1)
(circuit, secret) = generate_oracle(ORACLE_SIZE,False,3,cur_str)
circuits.append(circuit)
secrets.append(secret)
return (circuits, secrets)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Alpine Quantum Technologies GmbH 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Dummy resources for testing purposes."""
import enum
import random
import time
import uuid
from dataclasses import dataclass, field
from typing import Dict, List, Optional
from qiskit import QuantumCircuit
from typing_extensions import assert_never
from qiskit_aqt_provider import api_models
from qiskit_aqt_provider.aqt_provider import AQTProvider
from qiskit_aqt_provider.aqt_resource import AQTResource
class JobStatus(enum.Enum):
"""AQT job lifecycle labels."""
QUEUED = enum.auto()
ONGOING = enum.auto()
FINISHED = enum.auto()
ERROR = enum.auto()
CANCELLED = enum.auto()
@dataclass
class TestJob: # pylint: disable=too-many-instance-attributes
"""Job state holder for the TestResource."""
circuits: List[QuantumCircuit]
shots: int
status: JobStatus = JobStatus.QUEUED
job_id: uuid.UUID = field(default_factory=lambda: uuid.uuid4())
time_queued: float = field(default_factory=time.time)
time_submitted: float = 0.0
time_finished: float = 0.0
error_message: str = "error"
results: Dict[str, List[List[int]]] = field(init=False)
workspace: str = field(default="test-workspace", init=False)
resource: str = field(default="test-resource", init=False)
def __post_init__(self) -> None:
"""Calculate derived quantities."""
self.results = {
str(circuit_index): [
random.choices([0, 1], k=circuit.num_clbits) for _ in range(self.shots)
]
for circuit_index, circuit in enumerate(self.circuits)
}
def submit(self) -> None:
"""Submit the job for execution."""
self.time_submitted = time.time()
self.status = JobStatus.ONGOING
def finish(self) -> None:
"""The job execution finished successfully."""
self.time_finished = time.time()
self.status = JobStatus.FINISHED
def error(self) -> None:
"""The job execution triggered an error."""
self.time_finished = time.time()
self.status = JobStatus.ERROR
def cancel(self) -> None:
"""The job execution was cancelled."""
self.time_finished = time.time()
self.status = JobStatus.CANCELLED
def response_payload(self) -> api_models.JobResponse:
"""AQT API-compatible response for the current job status."""
if self.status is JobStatus.QUEUED:
return api_models.Response.queued(
job_id=self.job_id,
workspace_id=self.workspace,
resource_id=self.resource,
)
if self.status is JobStatus.ONGOING:
return api_models.Response.ongoing(
job_id=self.job_id,
workspace_id=self.workspace,
resource_id=self.resource,
finished_count=1,
)
if self.status is JobStatus.FINISHED:
return api_models.Response.finished(
job_id=self.job_id,
workspace_id=self.workspace,
resource_id=self.resource,
results=self.results,
)
if self.status is JobStatus.ERROR:
return api_models.Response.error(
job_id=self.job_id,
workspace_id=self.workspace,
resource_id=self.resource,
message=self.error_message,
)
if self.status is JobStatus.CANCELLED:
return api_models.Response.cancelled(
job_id=self.job_id, workspace_id=self.workspace, resource_id=self.resource
)
assert_never(self.status) # pragma: no cover
class TestResource(AQTResource): # pylint: disable=too-many-instance-attributes
"""AQT computing resource with hooks for triggering different execution scenarios."""
__test__ = False # disable pytest collection
def __init__(
self,
*,
min_queued_duration: float = 0.0,
min_running_duration: float = 0.0,
always_cancel: bool = False,
always_error: bool = False,
error_message: str = "",
) -> None:
"""Initialize the testing resource.
Args:
min_queued_duration: minimum time in seconds spent by all jobs in the QUEUED state
min_running_duration: minimum time in seconds spent by all jobs in the ONGOING state
always_cancel: always cancel the jobs directly after submission
always_error: always finish execution with an error
error_message: the error message returned by failed jobs. Implies `always_error`.
"""
super().__init__(
AQTProvider(""),
workspace_id="test-workspace",
resource_id="test",
resource_name="test-resource",
resource_type="simulator",
)
self.job: Optional[TestJob] = None
self.min_queued_duration = min_queued_duration
self.min_running_duration = min_running_duration
self.always_cancel = always_cancel
self.always_error = always_error or error_message
self.error_message = error_message or str(uuid.uuid4())
def submit(self, circuits: List[QuantumCircuit], shots: int) -> uuid.UUID:
job = TestJob(circuits, shots, error_message=self.error_message)
if self.always_cancel:
job.cancel()
self.job = job
return job.job_id
def result(self, job_id: uuid.UUID) -> api_models.JobResponse:
if self.job is None or self.job.job_id != job_id: # pragma: no cover
raise api_models.UnknownJobError(str(job_id))
now = time.time()
if (
self.job.status is JobStatus.QUEUED
and (now - self.job.time_queued) > self.min_queued_duration
):
self.job.submit()
if (
self.job.status is JobStatus.ONGOING
and (now - self.job.time_submitted) > self.min_running_duration
):
if self.always_error:
self.job.error()
else:
self.job.finish()
return self.job.response_payload()
class DummyResource(AQTResource):
"""A non-functional resource, for testing purposes."""
def __init__(self, token: str) -> None:
super().__init__(
AQTProvider(token),
workspace_id="dummy",
resource_id="dummy",
resource_name="dummy",
resource_type="simulator",
)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, Alpine Quantum Technologies 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from math import pi
import pytest
from pydantic import ValidationError
from qiskit import QuantumCircuit
from qiskit_aqt_provider import api_models
from qiskit_aqt_provider.circuit_to_aqt import circuits_to_aqt_job
def test_no_circuit() -> None:
"""Cannot convert an empty list of circuits to an AQT job request."""
with pytest.raises(ValidationError):
circuits_to_aqt_job([], shots=1)
def test_empty_circuit() -> None:
"""Circuits need at least one measurement operation."""
qc = QuantumCircuit(1)
with pytest.raises(ValueError):
circuits_to_aqt_job([qc], shots=1)
def test_just_measure_circuit() -> None:
"""Circuits with only measurement operations are valid."""
shots = 100
qc = QuantumCircuit(1)
qc.measure_all()
expected = api_models.JobSubmission(
job_type="quantum_circuit",
label="qiskit",
payload=api_models.QuantumCircuits(
circuits=[
api_models.QuantumCircuit(
repetitions=shots,
number_of_qubits=1,
quantum_circuit=api_models.Circuit(__root__=[api_models.Operation.measure()]),
),
]
),
)
result = circuits_to_aqt_job([qc], shots=shots)
assert result == expected
def test_valid_circuit() -> None:
"""A valid circuit with all supported basis gates."""
qc = QuantumCircuit(2)
qc.r(pi / 2, 0, 0)
qc.rz(pi / 5, 1)
qc.rxx(pi / 2, 0, 1)
qc.measure_all()
result = circuits_to_aqt_job([qc], shots=1)
expected = api_models.JobSubmission(
job_type="quantum_circuit",
label="qiskit",
payload=api_models.QuantumCircuits(
circuits=[
api_models.QuantumCircuit(
number_of_qubits=2,
repetitions=1,
quantum_circuit=api_models.Circuit(
__root__=[
api_models.Operation.r(theta=0.5, phi=0.0, qubit=0),
api_models.Operation.rz(phi=0.2, qubit=1),
api_models.Operation.rxx(theta=0.5, qubits=[0, 1]),
api_models.Operation.measure(),
]
),
),
]
),
)
assert result == expected
def test_invalid_gates_in_circuit() -> None:
"""Circuits must already be in the target basis when they are converted
to the AQT wire format.
"""
qc = QuantumCircuit(1)
qc.h(0) # not an AQT-resource basis gate
qc.measure_all()
with pytest.raises(ValueError, match="not in basis gate set"):
circuits_to_aqt_job([qc], shots=1)
def test_invalid_measurements() -> None:
"""Measurement operations can only be located at the end of the circuit."""
qc_invalid = QuantumCircuit(2, 2)
qc_invalid.r(pi / 2, 0.0, 0)
qc_invalid.measure([0], [0])
qc_invalid.r(pi / 2, 0.0, 1)
qc_invalid.measure([1], [1])
with pytest.raises(ValueError, match="at the end of the circuit"):
circuits_to_aqt_job([qc_invalid], shots=1)
# same circuit as above, but with the measurements at the end is valid
qc = QuantumCircuit(2, 2)
qc.r(pi / 2, 0.0, 0)
qc.r(pi / 2, 0.0, 1)
qc.measure([0], [0])
qc.measure([1], [1])
result = circuits_to_aqt_job([qc], shots=1)
expected = api_models.JobSubmission(
job_type="quantum_circuit",
label="qiskit",
payload=api_models.QuantumCircuits(
circuits=[
api_models.QuantumCircuit(
number_of_qubits=2,
repetitions=1,
quantum_circuit=api_models.Circuit(
__root__=[
api_models.Operation.r(theta=0.5, phi=0.0, qubit=0),
api_models.Operation.r(theta=0.5, phi=0.0, qubit=1),
api_models.Operation.measure(),
]
),
),
]
),
)
assert result == expected
def test_convert_multiple_circuits() -> None:
"""Convert multiple circuits. Check that the order is conserved."""
qc0 = QuantumCircuit(2)
qc0.r(pi / 2, 0.0, 0)
qc0.rxx(pi / 2, 0, 1)
qc0.measure_all()
qc1 = QuantumCircuit(1)
qc1.r(pi / 4, 0.0, 0)
qc1.measure_all()
result = circuits_to_aqt_job([qc0, qc1], shots=1)
expected = api_models.JobSubmission(
job_type="quantum_circuit",
label="qiskit",
payload=api_models.QuantumCircuits(
circuits=[
api_models.QuantumCircuit(
number_of_qubits=2,
repetitions=1,
quantum_circuit=api_models.Circuit(
__root__=[
api_models.Operation.r(theta=0.5, phi=0.0, qubit=0),
api_models.Operation.rxx(theta=0.5, qubits=[0, 1]),
api_models.Operation.measure(),
]
),
),
api_models.QuantumCircuit(
number_of_qubits=1,
repetitions=1,
quantum_circuit=api_models.Circuit(
__root__=[
api_models.Operation.r(theta=0.25, phi=0.0, qubit=0),
api_models.Operation.measure(),
]
),
),
],
),
)
assert result == expected
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Alpine Quantum Technologies GmbH 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Run various circuits on an offline simulator controlled by an AQTResource.
This tests whether the circuit pre-conditioning and results formatting works as
expected.
"""
import re
import typing
from collections import Counter
from fractions import Fraction
from math import pi
from typing import List
import numpy as np
import pytest
import qiskit
from qiskit import ClassicalRegister, QiskitError, QuantumCircuit, QuantumRegister
from qiskit.providers.jobstatus import JobStatus
from qiskit.result import Counts
from qiskit_aer import AerSimulator
from qiskit_experiments.library import QuantumVolume
from qiskit_aqt_provider.aqt_resource import AQTResource
from qiskit_aqt_provider.test.circuits import qft_circuit
from qiskit_aqt_provider.test.fixtures import MockSimulator
from qiskit_aqt_provider.test.resources import TestResource
from qiskit_aqt_provider.test.timeout import timeout
@pytest.mark.parametrize("shots", [200])
def test_empty_circuit(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Run an empty circuit."""
qc = QuantumCircuit(1)
qc.measure_all()
job = qiskit.execute(qc, offline_simulator_no_noise, shots=shots)
assert job.result().get_counts() == {"0": shots}
def test_circuit_success_lifecycle() -> None:
"""Go through the lifecycle of a successful single-circuit job.
Check that the job status visits the states QUEUED, RUNNING, and DONE.
"""
backend = TestResource(min_queued_duration=0.5, min_running_duration=0.5)
backend.options.update_options(query_period_seconds=0.1)
qc = QuantumCircuit(1)
qc.measure_all()
job = qiskit.execute(qc, backend)
assert job.status() is JobStatus.QUEUED
with timeout(2.0):
while job.status() is JobStatus.QUEUED:
continue
assert job.status() is JobStatus.RUNNING
with timeout(2.0):
while job.status() is JobStatus.RUNNING:
continue
assert job.status() is JobStatus.DONE
def test_error_circuit() -> None:
"""Check that errors in circuits are reported in the `errors` field of the Qiskit
result metadata, where the keys are the circuit job ids.
"""
backend = TestResource(always_error=True)
backend.options.update_options(query_period_seconds=0.1)
qc = QuantumCircuit(1)
qc.measure_all()
result = qiskit.execute(qc, backend).result()
assert result.success is False
assert backend.error_message == result._metadata["error"]
def test_cancelled_circuit() -> None:
"""Check that cancelled jobs return success = false."""
backend = TestResource(always_cancel=True)
qc = QuantumCircuit(1)
qc.measure_all()
result = qiskit.execute(qc, backend).result()
assert result.success is False
@pytest.mark.parametrize("shots", [1, 100, 200])
def test_simple_backend_run(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Run a simple circuit with `backend.run`."""
qc = QuantumCircuit(1)
qc.rx(pi, 0)
qc.measure_all()
trans_qc = qiskit.transpile(qc, offline_simulator_no_noise)
job = offline_simulator_no_noise.run(trans_qc, shots=shots)
assert job.result().get_counts() == {"1": shots}
@pytest.mark.parametrize("shots", [1, 100])
def test_simple_backend_execute(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Run two simple circuits with `qiskit.execute()`."""
qc0 = QuantumCircuit(2)
qc0.rx(pi, 0)
qc0.measure_all()
qc1 = QuantumCircuit(2)
qc1.rx(pi, 1)
qc1.measure_all()
# qiskit.execute calls the transpiler automatically
job = qiskit.execute([qc0, qc1], backend=offline_simulator_no_noise, shots=shots)
assert job.result().get_counts() == [{"01": shots}, {"10": shots}]
@pytest.mark.parametrize("backend", [MockSimulator(noisy=False), MockSimulator(noisy=True)])
def test_simple_backend_execute_noisy(backend: MockSimulator) -> None:
"""Execute a simple circuit on a noisy and noiseless backend. Check that the noisy backend
is indeed noisy.
"""
qc = QuantumCircuit(1)
qc.rx(pi, 0)
qc.measure_all()
# the single qubit error is around 0.1% so to see at least one error, we need to do more than
# 1000 shots.
total_shots = 4000 # take some margin
shots = 200 # maximum shots per submission
assert total_shots % shots == 0
counts: typing.Counter[str] = Counter()
for _ in range(total_shots // shots):
job = qiskit.execute(qc, backend=backend, shots=shots)
counts += Counter(job.result().get_counts())
assert sum(counts.values()) == total_shots
if backend.noisy:
assert set(counts.keys()) == {"0", "1"}
assert counts["0"] < 0.1 * counts["1"] # very crude
else:
assert set(counts.keys()) == {"1"}
@pytest.mark.parametrize("shots", [100])
def test_ancilla_qubits_mapping(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Run a circuit with two quantum registers, with only one mapped to the classical memory."""
qr = QuantumRegister(2)
qr_aux = QuantumRegister(3)
memory = ClassicalRegister(2)
qc = QuantumCircuit(qr, qr_aux, memory)
qc.rx(pi, qr[0])
qc.ry(pi, qr[1])
qc.rxx(pi / 2, qr_aux[0], qr_aux[1])
qc.measure(qr, memory)
trans_qc = qiskit.transpile(qc, offline_simulator_no_noise)
job = offline_simulator_no_noise.run(trans_qc, shots=shots)
# only two bits in the counts dict because memory has two bits width
assert job.result().get_counts() == {"11": shots}
@pytest.mark.parametrize("shots", [100])
def test_multiple_classical_registers(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Run a circuit with the final state mapped to multiple classical registers."""
qr = QuantumRegister(5)
memory_a = ClassicalRegister(2)
memory_b = ClassicalRegister(3)
qc = QuantumCircuit(qr, memory_a, memory_b)
qc.rx(pi, qr[0])
qc.rx(pi, qr[3])
qc.measure(qr[:2], memory_a)
qc.measure(qr[2:], memory_b)
trans_qc = qiskit.transpile(qc, offline_simulator_no_noise)
job = offline_simulator_no_noise.run(trans_qc, shots=shots)
# counts are returned as "memory_b memory_a", msb first
assert job.result().get_counts() == {"010 01": shots}
@pytest.mark.parametrize("shots", [123])
@pytest.mark.parametrize("memory_opt", [True, False])
def test_get_memory_simple(
shots: int, memory_opt: bool, offline_simulator_no_noise: AQTResource
) -> None:
"""Check that the raw bitstrings can be accessed for each shot via the
get_memory() method in Qiskit's Result.
The memory is only accessible if the `memory` option is set.
"""
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
result = qiskit.execute(qc, offline_simulator_no_noise, shots=shots, memory=memory_opt).result()
if memory_opt:
memory = result.get_memory()
assert set(memory) == {"11", "00"}
assert len(memory) == shots
else:
with pytest.raises(QiskitError, match=re.compile("no memory", re.IGNORECASE)):
result.get_memory()
@pytest.mark.parametrize("shots", [123])
def test_get_memory_ancilla_qubits(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Check that the raw bistrings returned by get_memory() in Qiskit's Result only
contain the mapped classical bits.
"""
qr = QuantumRegister(2)
qr_aux = QuantumRegister(3)
memory = ClassicalRegister(2)
qc = QuantumCircuit(qr, qr_aux, memory)
qc.rx(pi, qr[0])
qc.ry(pi, qr[1])
qc.rxx(pi / 2, qr_aux[0], qr_aux[1])
qc.measure(qr, memory)
job = qiskit.execute(qc, offline_simulator_no_noise, shots=shots, memory=True)
memory = job.result().get_memory()
assert set(memory) == {"11"}
assert len(memory) == shots
@pytest.mark.parametrize("shots", [123])
def test_get_memory_bit_ordering(shots: int, offline_simulator_no_noise: AQTResource) -> None:
"""Check that the bitstrings returned by the results produced by AQT jobs have the same
bit order as the Qiskit Aer simulators.
"""
sim = AerSimulator(method="statevector")
qc = QuantumCircuit(3)
qc.rx(pi, 0)
qc.rx(pi, 1)
qc.measure_all()
aqt_memory = (
qiskit.execute(qc, offline_simulator_no_noise, shots=shots, memory=True)
.result()
.get_memory()
)
sim_memory = qiskit.execute(qc, sim, shots=shots, memory=True).result().get_memory()
assert set(sim_memory) == set(aqt_memory)
# sanity check: bitstrings are no palindromes
assert not any(bitstring == bitstring[::-1] for bitstring in sim_memory)
@pytest.mark.parametrize(("shots", "qubits"), [(100, 5), (100, 8)])
def test_bell_states(shots: int, qubits: int, offline_simulator_no_noise: AQTResource) -> None:
"""Create a N qubits Bell state."""
qc = QuantumCircuit(qubits)
qc.h(0)
for qubit in range(1, qubits):
qc.cx(0, qubit)
qc.measure_all()
job = qiskit.execute(qc, offline_simulator_no_noise, shots=shots)
counts = job.result().get_counts()
assert set(counts.keys()) == {"0" * qubits, "1" * qubits}
assert sum(counts.values()) == shots
@pytest.mark.parametrize(("shots", "qubits"), [(100, 3)])
def test_simulator_quantum_volume(
shots: int, qubits: int, offline_simulator_no_noise: AQTResource
) -> None:
"""Run a qiskit_experiments.library.QuantumVolume job. Check that the noiseless simulator
has at least quantum volume 2**qubits.
"""
experiment = QuantumVolume(list(range(qubits)), offline_simulator_no_noise, trials=100)
experiment.set_transpile_options(optimization_level=0)
experiment.set_run_options(shots=shots)
job = experiment.run()
result = job.analysis_results("quantum_volume")
assert result.value == (1 << qubits)
assert result.extra["success"]
def test_period_finding_circuit(offline_simulator_no_noise: AQTResource) -> None:
"""Run a period-finding circuit for the function 13**x mod 15 on the offline simulator.
Do 20 evaluations of the 2-shot procedure and collect results. Check that the correct
period (4) is found often enough.
"""
# The function to find the period of
def f(x: int) -> int:
return pow(13, x, 15)
def f_circuit(num_qubits: int) -> QuantumCircuit:
"""Quantum circuit for f(x) = 13^x mod 15."""
qr_x = QuantumRegister(num_qubits, "x")
qr_fx = QuantumRegister(4, "f(x)") # 4 bits are enough to store any modulo 15 value
qc = QuantumCircuit(qr_x, qr_fx)
qc.x(qr_fx[0])
qc.x(qr_fx[2])
qc.x(qr_x[0])
qc.ccx(qr_x[0], qr_x[1], qr_fx[0])
qc.x(qr_x[0])
qc.ccx(qr_x[0], qr_x[1], qr_fx[1])
qc.x(qr_x[0])
qc.x(qr_x[1])
qc.ccx(qr_x[0], qr_x[1], qr_fx[2])
qc.x(qr_x[0])
qc.ccx(qr_x[0], qr_x[1], qr_fx[3])
qc.x(qr_x[1])
return qc
# Period finding circuit
num_qubits = 8
qr_x = QuantumRegister(num_qubits, "x")
qr_fx = QuantumRegister(4, "f(x)")
cr_x = ClassicalRegister(num_qubits, "c_x")
qc = QuantumCircuit(qr_x, qr_fx, cr_x)
# Hadamard gates for x register
for qubit in range(num_qubits):
qc.h(qr_x[qubit])
# Create f(x) and QFT subcircuits, and add them to qc
qc_f = f_circuit(num_qubits)
qc_qft = qft_circuit(num_qubits)
gate_f = qc_f.to_gate(label="f(x)")
gate_qft = qc_qft.to_gate(label="QFT")
qc.append(gate_f, range(num_qubits + 4))
qc.append(gate_qft, range(num_qubits))
# Measure qubits in x register
qc.measure(qr_x, cr_x)
def iteration() -> Counts:
result = qiskit.execute(qc, offline_simulator_no_noise, shots=2).result()
return result.get_counts()
n_attempts = 20
results: List[bool] = []
# run the circuits (2 shots) n_attempts times
# and do the classical post-processing to extract the period of the function f.
for _ in range(n_attempts):
try:
x1, x2 = iteration().int_outcomes().keys()
except ValueError: # identical results, skip
continue
m = num_qubits // 2
k1 = Fraction(x1, 2**num_qubits).limit_denominator(2**m - 1)
k2 = Fraction(x2, 2**num_qubits).limit_denominator(2**m - 1)
b1 = k1.denominator
b2 = k2.denominator
r = int(np.lcm(b1, b2))
results.append(f(r) == f(0))
# more than 50% of the attempts were successful
assert len(results) > n_attempts * 0.5
# got the right result more than 50% of the successful attempts
# this is quite loose, but doing more iterations would be annoyingly long on CI
assert np.count_nonzero(results) > len(results) * 0.5
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Alpine Quantum Technologies GmbH 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import importlib.metadata
from math import isclose, pi
from typing import Callable
import pytest
import qiskit
from qiskit.circuit import Parameter, QuantumCircuit
from qiskit.primitives import BackendSampler, BaseSampler, Sampler
from qiskit.providers import Backend
from qiskit.quantum_info import SparsePauliOp
from qiskit.transpiler.exceptions import TranspilerError
from qiskit_aqt_provider.aqt_resource import AQTResource
from qiskit_aqt_provider.primitives import AQTSampler
from qiskit_aqt_provider.primitives.estimator import AQTEstimator
from qiskit_aqt_provider.test.circuits import assert_circuits_equal
from qiskit_aqt_provider.test.fixtures import MockSimulator
@pytest.mark.skipif(
importlib.metadata.version("qiskit-terra") >= "0.24.0",
reason="qiskit.opflow is deprecated in qiskit-terra>=0.24",
)
def test_circuit_sampling_opflow(
offline_simulator_no_noise: AQTResource,
) -> None: # pragma: no cover
"""Check that an `AQTResource` can be used as backend for the legacy
`opflow.CircuitSampler` with parametric circuits.
"""
from qiskit.opflow import CircuitSampler, StateFn
theta = Parameter("θ")
qc = QuantumCircuit(2)
qc.rx(theta, 0)
qc.ry(theta, 0)
qc.rz(theta, 0)
qc.rxx(theta, 0, 1)
assert qc.num_parameters > 0
sampler = CircuitSampler(offline_simulator_no_noise)
sampled = sampler.convert(StateFn(qc), params={theta: pi}).eval()
assert sampled.to_matrix().tolist() == [[0.0, 0.0, 0.0, 1.0]]
@pytest.mark.parametrize(
"get_sampler",
[
# Reference implementation
lambda _: Sampler(),
# The AQT transpilation plugin doesn't support transpiling unbound parametric circuits
# and the BackendSampler doesn't fallback to transpiling the bound circuit if
# transpiling the unbound circuit failed (like the opflow sampler does).
# Sampling a parametric circuit with the generic BackendSampler is therefore not supported.
pytest.param(
lambda backend: BackendSampler(backend), marks=pytest.mark.xfail(raises=TranspilerError)
),
# The specialized implementation of the Sampler primitive for AQT backends delays the
# transpilation passes that require bound parameters.
lambda backend: AQTSampler(backend),
],
)
def test_circuit_sampling_primitive(
get_sampler: Callable[[Backend], BaseSampler],
offline_simulator_no_noise: AQTResource,
) -> None:
"""Check that a `Sampler` primitive using an AQT backend can sample parametric circuits."""
theta = Parameter("θ")
qc = QuantumCircuit(2)
qc.rx(theta, 0)
qc.ry(theta, 0)
qc.rz(theta, 0)
qc.rxx(theta, 0, 1)
qc.measure_all()
assert qc.num_parameters > 0
sampler = get_sampler(offline_simulator_no_noise)
sampled = sampler.run(qc, [pi]).result().quasi_dists
assert sampled == [{3: 1.0}]
@pytest.mark.parametrize("theta", [0.0, pi])
def test_operator_estimator_primitive_trivial_pauli_x(
theta: float, offline_simulator_no_noise: MockSimulator
) -> None:
"""Use the Estimator primitive to verify that <0|X|0> = <1|X|1> = 0.
Define the parametrized circuit that consists of the single gate Rx(θ) with
θ=0,π. Applied to |0>, this creates the states |0>,|1>. The Estimator primitive
is then used to evaluate the expectation value of the Pauli X operator on the
state produced by the circuit.
"""
offline_simulator_no_noise.simulator.options.seed_simulator = 0
estimator = AQTEstimator(offline_simulator_no_noise, options={"shots": 200})
qc = QuantumCircuit(1)
qc.rx(theta, 0)
op = SparsePauliOp("X")
result = estimator.run(qc, op).result()
assert abs(result.values[0]) < 0.1
def test_operator_estimator_primitive_trivial_pauli_z(
offline_simulator_no_noise: MockSimulator,
) -> None:
"""Use the Estimator primitive to verify that:
<0|Z|0> = 1
<1|Z|1> = -1
<ψ|Z|ψ> = 0 with |ψ> = (|0> + |1>)/√2.
The sampled circuit is always Rx(θ) with θ=0,π,π/2 respectively.
The θ values are passed into a single call to the estimator, thus also checking
that the AQTEstimator can deal with parametrized circuits.
"""
offline_simulator_no_noise.simulator.options.seed_simulator = 0
estimator = AQTEstimator(offline_simulator_no_noise, options={"shots": 200})
theta = Parameter("θ")
qc = QuantumCircuit(1)
qc.rx(theta, 0)
op = SparsePauliOp("Z")
result = estimator.run([qc] * 3, [op] * 3, [[0], [pi], [pi / 2]]).result()
z0, z1, z01 = result.values
assert isclose(z0, 1.0) # <0|Z|0>
assert isclose(z1, -1.0) # <1|Z|1>
assert abs(z01) < 0.1 # <ψ|Z|ψ>, |ψ> = (|0> + |1>)/√2
@pytest.mark.parametrize(
"theta",
[
pi / 3,
-pi / 3,
pi / 2,
-pi / 2,
3 * pi / 4,
-3 * pi / 4,
15 * pi / 8,
-15 * pi / 8,
33 * pi / 16,
-33 * pi / 16,
],
)
def test_aqt_sampler_transpilation(theta: float, offline_simulator_no_noise: MockSimulator) -> None:
"""Check that the AQTSampler passes the same circuit to the backend as a call to
`qiskit.execute` with the same transpiler call on the bound circuit would.
"""
theta_param = Parameter("θ")
# define a circuit with unbound parameters
qc = QuantumCircuit(2)
qc.rx(pi / 3, 0)
qc.rxx(theta_param, 0, 1)
qc.measure_all()
assert qc.num_parameters > 0
# sample the circuit, passing parameter assignments
sampler = AQTSampler(offline_simulator_no_noise)
sampler.run(qc, [theta]).result()
# the sampler was only called once
assert len(offline_simulator_no_noise.submitted_circuits) == 1
# get the circuit passed to the backend
((transpiled_circuit,),) = offline_simulator_no_noise.submitted_circuits
# compare to the circuit obtained by binding the parameters and transpiling at once
expected = qc.assign_parameters({theta_param: theta})
tr_expected = qiskit.transpile(expected, offline_simulator_no_noise)
assert_circuits_equal(transpiled_circuit, tr_expected)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, Alpine Quantum Technologies GmbH 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import json
import math
import uuid
from unittest import mock
import httpx
import pydantic as pdt
import pytest
from polyfactory.factories.pydantic_factory import ModelFactory
from pytest_httpx import HTTPXMock
from qiskit import QuantumCircuit
from qiskit.providers.exceptions import JobTimeoutError
from qiskit_aqt_provider import api_models
from qiskit_aqt_provider.aqt_job import AQTJob
from qiskit_aqt_provider.aqt_options import AQTOptions
from qiskit_aqt_provider.aqt_resource import AQTResource
from qiskit_aqt_provider.test.circuits import assert_circuits_equal, empty_circuit
from qiskit_aqt_provider.test.fixtures import MockSimulator
from qiskit_aqt_provider.test.resources import DummyResource, TestResource
from qiskit_aqt_provider.versions import USER_AGENT
class OptionsFactory(ModelFactory[AQTOptions]):
__model__ = AQTOptions
query_timeout_seconds = 10.0
def test_options_set_query_timeout(offline_simulator_no_noise: AQTResource) -> None:
"""Set the query timeout for job status queries with different values."""
backend = offline_simulator_no_noise
# doesn't work with str
with pytest.raises(pdt.ValidationError):
backend.options.update_options(query_timeout_seconds="abc")
# works with integers
backend.options.update_options(query_timeout_seconds=123)
assert backend.options.query_timeout_seconds == 123
# works with floats
backend.options.update_options(query_timeout_seconds=123.45)
assert backend.options.query_timeout_seconds == 123.45
# works with None (no timeout)
backend.options.update_options(query_timeout_seconds=None)
assert backend.options.query_timeout_seconds is None
def test_options_set_query_period(offline_simulator_no_noise: AQTResource) -> None:
"""Set the query period for job status queries with different values."""
backend = offline_simulator_no_noise
# works with integers
backend.options.update_options(query_period_seconds=123)
assert backend.options.query_period_seconds == 123
# works with floats
backend.options.update_options(query_period_seconds=123.45)
assert backend.options.query_period_seconds == 123.45
# doesn't work with None
with pytest.raises(pdt.ValidationError):
backend.options.update_options(query_period_seconds=None)
# doesn't work with str
with pytest.raises(pdt.ValidationError):
backend.options.update_options(query_period_seconds="abc")
def test_query_timeout_propagation() -> None:
"""Check that the query timeout is properly propagated from the backend options to
the job result polling loop.
Acquire a resource with 10s processing time, but set the job result timeout to 1s.
Check that calling `result()` on the job handle fails with a timeout error.
"""
response_delay = 10.0
timeout = 1.0
assert timeout < response_delay
backend = TestResource(min_running_duration=response_delay)
backend.options.update_options(query_timeout_seconds=timeout, query_period_seconds=0.5)
qc = QuantumCircuit(1)
qc.rx(3.14, 0)
job = backend.run(qc)
with pytest.raises(JobTimeoutError):
job.result()
def test_query_period_propagation() -> None:
"""Check that the query wait duration is properly propagated from the backend options
to the job result polling loop.
Set the polling period (much) shorter than the backend's processing time. Check that
the backend is polled the calculated number of times.
"""
response_delay = 2.0
period_seconds = 0.5
timeout_seconds = 3.0
assert timeout_seconds > response_delay # won't time out
backend = TestResource(min_running_duration=response_delay)
backend.options.update_options(
query_timeout_seconds=timeout_seconds, query_period_seconds=period_seconds
)
qc = QuantumCircuit(1)
qc.rx(3.14, 0)
qc.measure_all()
job = backend.run(qc)
with mock.patch.object(AQTJob, "status", wraps=job.status) as mocked_status:
job.result()
lower_bound = math.floor(response_delay / period_seconds)
upper_bound = math.ceil(response_delay / period_seconds) + 1
assert lower_bound <= mocked_status.call_count <= upper_bound
def test_run_options_propagation(offline_simulator_no_noise: MockSimulator) -> None:
"""Check that options passed to AQTResource.run are propagated to the corresponding job."""
default = offline_simulator_no_noise.options.copy()
while True:
overrides = OptionsFactory.build()
if overrides != default:
break
qc = QuantumCircuit(1)
qc.measure_all()
# don't submit the circuit to the simulator
with mock.patch.object(AQTJob, "submit") as mocked_submit:
job = offline_simulator_no_noise.run(qc, **overrides.dict())
assert job.options == overrides
mocked_submit.assert_called_once()
def test_run_options_unknown(offline_simulator_no_noise: MockSimulator) -> None:
"""Check that AQTResource.run accepts but warns about unknown options."""
default = offline_simulator_no_noise.options.copy()
overrides = {"shots": 123, "unknown_option": True}
assert set(overrides) - set(default) == {"unknown_option"}
qc = QuantumCircuit(1)
qc.measure_all()
with mock.patch.object(AQTJob, "submit") as mocked_submit:
with pytest.warns(UserWarning, match="not used"):
job = offline_simulator_no_noise.run(qc, **overrides)
assert job.options.shots == 123
mocked_submit.assert_called_once()
def test_run_options_invalid(offline_simulator_no_noise: MockSimulator) -> None:
"""Check that AQTResource.run reject valid option names with invalid values."""
qc = QuantumCircuit(1)
qc.measure_all()
with pytest.raises(pdt.ValidationError, match="shots"):
offline_simulator_no_noise.run(qc, shots=-123)
def test_double_job_submission(offline_simulator_no_noise: MockSimulator) -> None:
"""Check that attempting to re-submit a job raises a RuntimeError."""
qc = QuantumCircuit(1)
qc.r(3.14, 0.0, 0)
qc.measure_all()
# AQTResource.run submits the job
job = offline_simulator_no_noise.run(qc)
with pytest.raises(RuntimeError, match=f"{job.job_id()}"):
job.submit()
# Check that the job was actually submitted
((submitted_circuit,),) = offline_simulator_no_noise.submitted_circuits
assert_circuits_equal(submitted_circuit, qc)
def test_offline_simulator_invalid_job_id(offline_simulator_no_noise: MockSimulator) -> None:
"""Check that the offline simulator raises UnknownJobError if the job id passed
to `result()` is invalid.
"""
qc = QuantumCircuit(1)
qc.measure_all()
job = offline_simulator_no_noise.run([qc], shots=1)
job_id = uuid.UUID(hex=job.job_id())
invalid_job_id = uuid.uuid4()
assert invalid_job_id != job_id
with pytest.raises(api_models.UnknownJobError, match=str(invalid_job_id)):
offline_simulator_no_noise.result(invalid_job_id)
# querying the actual job is successful
result = offline_simulator_no_noise.result(job_id)
assert result.job.job_id == job_id
def test_submit_valid_response(httpx_mock: HTTPXMock) -> None:
"""Check that AQTResource.submit passes the authorization token and
extracts the correct job_id when the response payload is valid.
"""
token = str(uuid.uuid4())
backend = DummyResource(token)
expected_job_id = uuid.uuid4()
def handle_submit(request: httpx.Request) -> httpx.Response:
assert request.headers["user-agent"] == USER_AGENT
assert request.headers["authorization"] == f"Bearer {token}"
return httpx.Response(
status_code=httpx.codes.OK,
json=json.loads(
api_models.Response.queued(
job_id=expected_job_id,
resource_id=backend.resource_id,
workspace_id=backend.workspace_id,
).json()
),
)
httpx_mock.add_callback(handle_submit, method="POST")
job_id = backend.submit([empty_circuit(2)], shots=10)
assert job_id == expected_job_id
def test_submit_bad_request(httpx_mock: HTTPXMock) -> None:
"""Check that AQTResource.submit raises an HTTPError if the request
is flagged invalid by the server.
"""
backend = DummyResource("")
httpx_mock.add_response(status_code=httpx.codes.BAD_REQUEST)
with pytest.raises(httpx.HTTPError):
backend.submit([empty_circuit(2)], shots=10)
def test_result_valid_response(httpx_mock: HTTPXMock) -> None:
"""Check that AQTResource.result passes the authorization token
and returns the raw response payload.
"""
token = str(uuid.uuid4())
backend = DummyResource(token)
job_id = uuid.uuid4()
payload = api_models.Response.cancelled(
job_id=job_id, resource_id=backend.resource_id, workspace_id=backend.workspace_id
)
def handle_result(request: httpx.Request) -> httpx.Response:
assert request.headers["user-agent"] == USER_AGENT
assert request.headers["authorization"] == f"Bearer {token}"
return httpx.Response(status_code=httpx.codes.OK, json=json.loads(payload.json()))
httpx_mock.add_callback(handle_result, method="GET")
response = backend.result(job_id)
assert response == payload
def test_result_bad_request(httpx_mock: HTTPXMock) -> None:
"""Check that AQTResource.result raises an HTTPError if the request
is flagged invalid by the server.
"""
backend = DummyResource("")
httpx_mock.add_response(status_code=httpx.codes.BAD_REQUEST)
with pytest.raises(httpx.HTTPError):
backend.result(uuid.uuid4())
def test_result_unknown_job(httpx_mock: HTTPXMock) -> None:
"""Check that AQTResource.result raises UnknownJobError if the API
responds with an UnknownJob payload.
"""
backend = DummyResource("")
job_id = uuid.uuid4()
httpx_mock.add_response(json=json.loads(api_models.Response.unknown_job(job_id=job_id).json()))
with pytest.raises(api_models.UnknownJobError, match=str(job_id)):
backend.result(job_id)
def test_resource_fixture_detect_invalid_circuits(
offline_simulator_no_noise: MockSimulator,
) -> None:
"""Pass a circuit that cannot be converted to the AQT API to the mock simulator.
This must fail.
"""
qc = QuantumCircuit(2)
qc.h(0)
qc.cnot(0, 1)
qc.measure_all()
with pytest.raises(ValueError, match="^Circuit cannot be converted"):
offline_simulator_no_noise.run(qc)
|
https://github.com/alpine-quantum-technologies/qiskit-aqt-provider-rc
|
alpine-quantum-technologies
|
# This code is part of Qiskit.
#
# (C) Alpine Quantum Technologies GmbH 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from math import pi
from typing import Union
import pytest
from hypothesis import assume, given
from hypothesis import strategies as st
from qiskit import QuantumCircuit, transpile
from qiskit.circuit.library import RXGate, RYGate
from qiskit_aqt_provider.aqt_resource import AQTResource
from qiskit_aqt_provider.test.circuits import (
assert_circuits_equal,
assert_circuits_equivalent,
qft_circuit,
)
from qiskit_aqt_provider.test.fixtures import MockSimulator
from qiskit_aqt_provider.transpiler_plugin import rewrite_rx_as_r, wrap_rxx_angle
@pytest.mark.parametrize(
("input_theta", "output_theta", "output_phi"),
[
(pi / 3, pi / 3, 0.0),
(-pi / 3, pi / 3, pi),
(7 * pi / 5, 3 * pi / 5, pi),
(25 * pi, pi, pi),
(22 * pi / 3, 2 * pi / 3, pi),
],
)
def test_rx_rewrite_example(
input_theta: float,
output_theta: float,
output_phi: float,
) -> None:
"""Snapshot test for the Rx(θ) → R(θ, φ) rule."""
result = QuantumCircuit(1)
result.append(rewrite_rx_as_r(input_theta), (0,))
expected = QuantumCircuit(1)
expected.r(output_theta, output_phi, 0)
reference = QuantumCircuit(1)
reference.rx(input_theta, 0)
assert_circuits_equal(result, expected)
assert_circuits_equivalent(result, reference)
@given(theta=st.floats(allow_nan=False, min_value=-1000 * pi, max_value=1000 * pi))
@pytest.mark.parametrize("optimization_level", [1, 2, 3])
@pytest.mark.parametrize("test_gate", [RXGate, RYGate])
def test_rx_ry_rewrite_transpile(
theta: float,
optimization_level: int,
test_gate: Union[RXGate, RYGate],
) -> None:
"""Test the rewrite rule: Rx(θ), Ry(θ) → R(θ, φ), θ ∈ [0, π], φ ∈ [0, 2π]."""
assume(abs(theta) > pi / 200)
# we only need the backend's transpiler target for this test
backend = MockSimulator(noisy=False)
qc = QuantumCircuit(1)
qc.append(test_gate(theta), (0,))
trans_qc = transpile(qc, backend, optimization_level=optimization_level)
assert isinstance(trans_qc, QuantumCircuit)
assert_circuits_equivalent(trans_qc, qc)
assert set(trans_qc.count_ops()) <= set(backend.configuration().basis_gates)
num_r = trans_qc.count_ops().get("r")
assume(num_r is not None)
assert num_r == 1
for operation in trans_qc.data:
instruction = operation[0]
if instruction.name == "r":
theta, phi = instruction.params
assert 0 <= float(theta) <= pi
assert 0 <= float(phi) <= 2 * pi
break
else: # pragma: no cover
pytest.fail("No R gates in transpiled circuit.")
def test_decompose_1q_rotations_example(offline_simulator_no_noise: AQTResource) -> None:
"""Snapshot test for the efficient rewrite of single-qubit rotation runs as ZXZ."""
qc = QuantumCircuit(1)
qc.rx(pi / 2, 0)
qc.ry(pi / 2, 0)
expected = QuantumCircuit(1)
expected.rz(-pi / 2, 0)
expected.r(pi / 2, 0, 0)
result = transpile(qc, offline_simulator_no_noise, optimization_level=3)
assert isinstance(result, QuantumCircuit) # only got one circuit back
assert_circuits_equal(result, expected)
assert_circuits_equivalent(result, expected)
def test_rxx_wrap_angle_case0() -> None:
"""Snapshot test for Rxx(θ) rewrite with 0 <= θ <= π/2."""
result = QuantumCircuit(2)
result.append(wrap_rxx_angle(pi / 2), (0, 1))
expected = QuantumCircuit(2)
expected.rxx(pi / 2, 0, 1)
assert_circuits_equal(result.decompose(), expected)
assert_circuits_equivalent(result.decompose(), expected)
def test_rxx_wrap_angle_case0_negative() -> None:
"""Snapshot test for Rxx(θ) rewrite with -π/2 <= θ < 0."""
result = QuantumCircuit(2)
result.append(wrap_rxx_angle(-pi / 2), (0, 1))
expected = QuantumCircuit(2)
expected.rz(pi, 0)
expected.rxx(pi / 2, 0, 1)
expected.rz(pi, 0)
assert_circuits_equal(result.decompose(), expected)
assert_circuits_equivalent(result.decompose(), expected)
def test_rxx_wrap_angle_case1() -> None:
"""Snapshot test for Rxx(θ) rewrite with π/2 < θ <= 3π/2."""
result = QuantumCircuit(2)
result.append(wrap_rxx_angle(3 * pi / 2), (0, 1))
expected = QuantumCircuit(2)
expected.rx(pi, 0)
expected.rx(pi, 1)
expected.rxx(pi / 2, 0, 1)
assert_circuits_equal(result.decompose(), expected)
assert_circuits_equivalent(result.decompose(), expected)
def test_rxx_wrap_angle_case1_negative() -> None:
"""Snapshot test for Rxx(θ) rewrite with -3π/2 <= θ < -π/2."""
result = QuantumCircuit(2)
result.append(wrap_rxx_angle(-3 * pi / 2), (0, 1))
expected = QuantumCircuit(2)
expected.rxx(pi / 2, 0, 1)
assert_circuits_equal(result.decompose(), expected)
assert_circuits_equivalent(result.decompose(), expected)
def test_rxx_wrap_angle_case2() -> None:
"""Snapshot test for Rxx(θ) rewrite with θ > 3*π/2."""
result = QuantumCircuit(2)
result.append(wrap_rxx_angle(18 * pi / 10), (0, 1)) # mod 2π = 9π/5 → -π/5
expected = QuantumCircuit(2)
expected.rz(pi, 0)
expected.rxx(pi / 5, 0, 1)
expected.rz(pi, 0)
assert_circuits_equal(result.decompose(), expected)
assert_circuits_equivalent(result.decompose(), expected)
def test_rxx_wrap_angle_case2_negative() -> None:
"""Snapshot test for Rxx(θ) rewrite with θ < -3π/2."""
result = QuantumCircuit(2)
result.append(wrap_rxx_angle(-18 * pi / 10), (0, 1)) # mod 2π = π/5
expected = QuantumCircuit(2)
expected.rxx(pi / 5, 0, 1)
assert_circuits_equal(result.decompose(), expected)
assert_circuits_equivalent(result.decompose(), expected)
@given(
angle=st.floats(
allow_nan=False,
allow_infinity=False,
min_value=-1000 * pi,
max_value=1000 * pi,
)
)
@pytest.mark.parametrize("qubits", [3])
@pytest.mark.parametrize("optimization_level", [1, 2, 3])
def test_rxx_wrap_angle_transpile(angle: float, qubits: int, optimization_level: int) -> None:
"""Check that Rxx angles are wrapped by the transpiler."""
assume(abs(angle) > pi / 200)
qc = QuantumCircuit(qubits)
qc.rxx(angle, 0, 1)
# we only need the backend's transpilation target for this test
backend = MockSimulator(noisy=False)
trans_qc = transpile(qc, backend, optimization_level=optimization_level)
assert isinstance(trans_qc, QuantumCircuit)
assert_circuits_equivalent(trans_qc, qc)
assert set(trans_qc.count_ops()) <= set(backend.configuration().basis_gates)
num_rxx = trans_qc.count_ops().get("rxx")
# in high optimization levels, the gate might be dropped
assume(num_rxx is not None)
assert num_rxx == 1
# check that all Rxx have angles in [0, π/2]
for operation in trans_qc.data:
instruction = operation[0]
if instruction.name == "rxx":
(theta,) = instruction.params
assert 0 <= float(theta) <= pi / 2
break # there's only one Rxx gate in the circuit
else: # pragma: no cover
pytest.fail("Transpiled circuit contains no Rxx gate.")
@pytest.mark.parametrize("qubits", [1, 5, 10])
@pytest.mark.parametrize("optimization_level", [1, 2, 3])
def test_qft_circuit_transpilation(
qubits: int, optimization_level: int, offline_simulator_no_noise: AQTResource
) -> None:
"""Transpile a N-qubit QFT circuit for an AQT backend. Check that the angles are properly
wrapped.
"""
qc = qft_circuit(qubits)
trans_qc = transpile(qc, offline_simulator_no_noise, optimization_level=optimization_level)
assert isinstance(trans_qc, QuantumCircuit)
assert set(trans_qc.count_ops()) <= set(offline_simulator_no_noise.configuration().basis_gates)
for operation in trans_qc.data:
instruction = operation[0]
if instruction.name == "rxx":
(theta,) = instruction.params
assert 0 <= float(theta) <= pi / 2
if instruction.name == "r":
(theta, _) = instruction.params
assert abs(theta) <= pi
if optimization_level < 3 and qubits < 6:
assert_circuits_equivalent(qc, trans_qc)
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
from qiskit import *
provider = IBMQ.load_account()
backend = provider.get_backend('ibmq_valencia')
import pprint
pprint.pprint(backend.configuration().coupling_map)
from qiskit.tools.visualization import plot_error_map
plot_error_map(backend)
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
%matplotlib inline
from qiskit import QuantumCircuit
from qiskit.compiler import transpile
from qiskit.transpiler import PassManager
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, IBMQ
from qiskit.tools.monitor import backend_overview, backend_monitor
from qiskit.compiler import transpile, assemble
from qiskit.visualization import *
from qiskit.transpiler import *
from qiskit.converters import circuit_to_dag
from qiskit.tools.visualization import dag_drawer
import pprint
provider = IBMQ.load_account()
real_backend = provider.get_backend('ibmq_16_melbourne')
plot_error_map(real_backend)
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[2])
circuit.measure(qr, cr)
circuit.draw(output='mpl')
compiled_circuit = transpile(circuit, real_backend, optimization_level=0)
compiled_circuit.draw(output='mpl', idle_wires=False)
compiled_circuit = transpile(circuit, real_backend, optimization_level=3)
compiled_circuit.draw(output='mpl', idle_wires=False)
qr2 = QuantumRegister(14)
cr2 = ClassicalRegister(14)
circuit2 = QuantumCircuit(qr2, cr2)
circuit2.h(qr2[0])
circuit2.cx(qr2[0], qr2[6])
circuit2.cx(qr2[6], qr2[13])
circuit2.cx(qr2[13], qr2[7])
circuit2.cx(qr2[7], qr2[0])
circuit2.measure(qr2, cr2)
for level in range(4):
compiled_circuit2 = transpile(circuit2, real_backend, optimization_level=level)
print('---------- Level = ' + str(level) + '----------')
print('gates = ', compiled_circuit2.count_ops())
print('depth = ', compiled_circuit2.depth())
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
import qiskit
from qiskit import Aer
from qiskit.aqua.algorithms import Shor
N=15
shor = Shor(N)
backend = Aer.get_backend('qasm_simulator')
result = shor.run(backend)
print("The factors of {} computed by the Shor's algorithm: {}.".format(N, result['factors'][0]))
from qiskit.aqua.algorithms import Grover
from qiskit.aqua.components.oracles import TruthTableOracle, LogicalExpressionOracle
truthTable = '0010100101000001'
oracle = TruthTableOracle(truthTable)
grover = Grover(oracle)
result = grover.run(backend)
from qiskit.visualization import plot_histogram
plot_histogram(result['measurement'])
secondOracle = LogicalExpressionOracle('(a & ~b) & (c ^ d)')
grover = Grover(secondOracle)
result = grover.run(backend)
plot_histogram(result['measurement'])
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
from qiskit import IBMQ
provider = IBMQ.load_account()
from qiskit.tools.monitor import backend_overview
backend_overview()
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
from qiskit import *
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
%matplotlib inline
circuit.draw('mpl')
circuit.draw()
circuit.h([0])
circuit.draw()
%matplotlib inline
circuit.draw(output="mpl")
circuit.x([1])
circuit.draw()
circuit.draw(output="mpl")
circuit.cx([1], [0])
circuit.draw()
circuit.draw(output="mpl")
from qiskit import Aer
for backend in Aer.backends():
print(backend.name())
from qiskit import IBMQ
provider = IBMQ.load_account()
provider.backends()
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
from qiskit import *
qiskit.__qiskit_version__
|
https://github.com/tstopa/Qiskit_for_high_schools
|
tstopa
|
from qiskit.visualization import plot_bloch_vector
%matplotlib inline
plot_bloch_vector([0,0,1], title="Standard Qubit Value")
plot_bloch_vector([1,0,0], title="Qubit in the state |+⟩")
from qiskit import *
from math import pi
from qiskit.visualization import plot_bloch_multivector
qcA = QuantumCircuit(1)
qcA.x(0)
backend = Aer.get_backend('statevector_simulator')
out = execute(qcA,backend).result().get_statevector()
plot_bloch_multivector(out)
qcB = QuantumCircuit(1)
qcB.h(0)
backend = Aer.get_backend('statevector_simulator')
out = execute(qcB,backend).result().get_statevector()
plot_bloch_multivector(out)
|
https://github.com/martian17/qiskit-graph-coloring-hamiltonian
|
martian17
|
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, BasicAer
from qiskit.visualization import plot_histogram
def multi_toffoli_q(qc, q_controls, q_target, q_ancillas=None):
"""
N = number of qubits
controls = control qubits
target = target qubit
ancillas = ancilla qubits, len(ancillas) = len(controls) - 2
"""
# q_controls = register_to_list(q_controls)
# q_ancillas = register_to_list(q_ancillas)
if len(q_controls) == 1:
qc.cx(q_controls[0], q_target)
elif len(q_controls) == 2:
qc.ccx(q_controls[0], q_controls[1], q_target)
elif len(q_controls) > 2 and (q_ancillas is None or len(q_ancillas) < len(q_controls) - 2):
raise Exception('ERROR: need more ancillas for multi_toffoli!')
else:
multi_toffoli_q(qc, q_controls[:-1], q_ancillas[-1], q_ancillas[:-1])
qc.ccx(q_controls[-1], q_ancillas[-1], q_target)
multi_toffoli_q(qc, q_controls[:-1], q_ancillas[-1], q_ancillas[:-1])
n = 7
q1 = QuantumRegister(n)
q2 = QuantumRegister(n)
q3 = QuantumRegister(1)
q4 = QuantumRegister(n)
cr = ClassicalRegister(n)
def m_gate_for_special_case(q1,q2,q4, hamiltonian):
circuit = QuantumCircuit(qr, cr)
def gate_aza(q1,q2,q3,q4):
#circuit = QuantumCircuit(q1,q2,q3,q4)
matrix = [[1, 0, 0, 0], [0, 1/math.sqrt(2), 1/math.sqrt(2), 0], [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], [0,0,0,1]]
for i in range(n):
circuit.unitary(matrix,[q1[i],q2[i]])
circuit.barrier()
for i in range(n):
circuit.x(q2[i])
circuit.ccx(q1[i],q2[i],q3)
circuit.x(q2[i])
circuit.barrier()
#hogehoge
circuit.x(q3)
for i in range(n):
circuit.cu1(-2**i, q3, q4[i])
circuit.x(q3)
for i in range(n):
circuit.cu1(2**i, q3, q4[i])
circuit.barrier()
for i in range(n):
circuit.x(q2[i])
circuit.ccx(q1[i],q2[i],q3)
circuit.x(q2[i])
circuit.barrier()
for i in range(n):
circuit.unitary(matrix,[q1[i],q2[i]])
|
https://github.com/martian17/qiskit-graph-coloring-hamiltonian
|
martian17
|
"""
This file is a prototype. Not used.
"""
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram
import math
def multi_toffoli_q(qc, q_controls, q_target, q_ancillas=None):
"""
N = number of qubits
controls = control qubits
target = target qubit
ancillas = ancilla qubits, len(ancillas) = len(controls) - 2
"""
# q_controls = register_to_list(q_controls)
# q_ancillas = register_to_list(q_ancillas)
if len(q_controls) == 1:
qc.cx(q_controls[0], q_target)
elif len(q_controls) == 2:
qc.ccx(q_controls[0], q_controls[1], q_target)
elif len(q_controls) > 2 and (q_ancillas is None or len(q_ancillas) < len(q_controls) - 2):
raise Exception('ERROR: need more ancillas for multi_toffoli!')
else:
multi_toffoli_q(qc, q_controls[:-1], q_ancillas[-1], q_ancillas[:-1])
qc.ccx(q_controls[-1], q_ancillas[-1], q_target)
multi_toffoli_q(qc, q_controls[:-1], q_ancillas[-1], q_ancillas[:-1])
def m_gate_for_special_case():
qr = QuantumRegister(7)
cr = ClassicalRegister(7)
circuit = QuantumCircuit(qr, cr)
circuit.x(0)
multi_toffoli_q(circuit, [0,1], 6)
circuit.x(0)
circuit.barrier()
circuit.x(1)
multi_toffoli_q(circuit, [0,1], 6)
circuit.barrier()
multi_toffoli_q(circuit, [0,1], 3)
circuit.barrier()
circuit.x([0,1])
multi_toffoli_q(circuit, [0,1], 2)
circuit.x(0)
circuit.barrier()
circuit.measure(qr, cr)
print(circuit)
execute_circuit((circuit))
def exists_one_in_column(m, column):
for i in range(len(m)):
if m[i][column] > 0:
return (True, i)
return (False, 0)
def int_to_bin(num, digit):
num_bin = bin(num)[2:]
if len(num_bin) < digit:
num_bin = '0'* (digit - len(num_bin)) + num_bin
return num_bin
def one_bits_list(num_bin, b):
'''
:param num_bin: binary
:param b: '0' or '1'
:return: the positions where characters equals b
'''
ret = []
for iter, c in enumerate(num_bin[::-1]):
if c == b:
ret.append(iter)
return ret
def m_gate_for_general_case(q1, q2, q3, hamiltonian, q_ancilla=None):
n = len(q1)
c1 = ClassicalRegister(n)
c2 = ClassicalRegister(n)
c3 = ClassicalRegister(n)
m_circuit = QuantumCircuit(q1,q2,q3,q_ancilla,c1,c2,c3)
q_ancilla_idx = [i for i in range(3*n+1,4*n-1)]
print(m_circuit)
for i in range(2**n):
exists_nonzero, nonzero_idx = exists_one_in_column(hamiltonian, i)
if exists_nonzero:
num_bin = int_to_bin(i, n)
ones_idx = one_bits_list(num_bin, '0')
for idx in ones_idx:
m_circuit.x(idx)
multi_toffoli_q(m_circuit, [i for i in range(n)], 2*n, q_ancilla_idx)
for idx in ones_idx:
m_circuit.x(idx)
m_circuit.barrier()
if exists_nonzero:
num_bin = int_to_bin(i, n)
ones_idx = one_bits_list(num_bin, '0')
nonzero_idx_bin = int_to_bin(nonzero_idx, n)
for idx in ones_idx:
m_circuit.x(idx)
target_list = one_bits_list(nonzero_idx_bin, '1')
print(num_bin, ones_idx, nonzero_idx_bin, target_list)
for t in target_list:
multi_toffoli_q(m_circuit, [i for i in range(n)], n+t, q_ancilla_idx)
for idx in ones_idx:
m_circuit.x(idx)
m_circuit.barrier()
m_circuit.measure(q1, c1)
m_circuit.measure(q2, c2)
m_circuit.measure(q3, c3)
def execute_circuit(circuit):
backend = Aer.get_backend('qasm_simulator')
shots = 1024
results = execute(circuit, backend=backend, shots=shots).results()
answer = results.get_counts()
r = plot_histogram(answer)
r.show()
if __name__ == "__main__":
hamil = [[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0]]
# hamil = [
# [0, 2, 0, 0],
# [2, 0, 0, 0],
# [0, 0, 0, 1],
# [0, 0, 1, 0]
# ]
hamil = [
[0,2,0,0,0,0,0,0],
[2,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0],
[0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
]
n = int(math.log2(len(hamil)))
q1 = QuantumRegister(n)
q2 = QuantumRegister(n)
q3 = QuantumRegister(n)
q_ancilla = None
if n > 2:
q_ancilla = QuantumRegister(n-2)
m_gate_for_general_case(q1,q2,q3,hamil, q_ancilla)
|
https://github.com/martian17/qiskit-graph-coloring-hamiltonian
|
martian17
|
import math
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, BasicAer
from qiskit.visualization import plot_histogram
# 4,0 -> 00
# 4,1 -> 01
# 4,2 -> 10
# 4,3 -> 11
# 4, 3 -> 11
# 2,3 -> 1,1
# 1,1 -> exit
def genvec(veclen, n):
vec = []
veclen = int(veclen/2)
while veclen > 0:
if veclen <= n:
n = n - veclen
vec.append(1)
else:
vec.append(0)
veclen = int(veclen/2)
vec.reverse()
return vec
def convertToState(v): # converts from [0,0,1,0] to [1,0]
# initializing the returning vector
lenv = len(v)
for i in range(lenv):
if v[i] != 0:
return genvec(lenv,i)
return False
def mcts(circuit, controls, target, ancilla, activq):# multi cubit toffoli select
for i in range(len(controls)):
if activq[i] == 0:
circuit.x(controls[i])
circuit.mct(controls, target, ancilla, 'basic')
for i in range(len(controls)):
if activq[i] == 0:
circuit.x(controls[i])
def all0(v):
for e in v:
if e != 0:
return False
return True
def fnon0(v):
for e in v:
if e != 0:
return e
return False
def vdeg(v):
for i in range(len(v)):
e = v[i]
if e != 0:
return i
return False
def vecinfo(v):
for i in range(len(v)):
e = v[i]
if e != 0:
return [e,i]
return False
def gate_mw(hamil,N,q1,q2,q3,q4,ancils):
circuit = QuantumCircuit(q1,q2,q3,q4,ancils)
matlen = len(hamil)
# w
for i in range(matlen):
if all0(hamil[i]):
continue # if there is no correcponding state
else:
# find the target
val = fnon0(hamil[i])
targetLocation = genvec(matlen,val) # weight
# print(hamil[i],i,targetLocation)
for j in range(N):# for each controlled output
if targetLocation[j] == 1:
# print(matlen,j,i)
mcts(circuit, q1, q4[j], ancils, genvec(matlen,i))
circuit.barrier()
# print("b")
# m
for i in range(matlen):
if all0(hamil[i]):
continue # if there is no correcponding state
else:
# find the target
val = fnon0(hamil[i])
targetLocation = convertToState(hamil[i]) # multiplication result
# print(hamil[i],i,targetLocation)
for j in range(N):# for each controlled output
if targetLocation[j] == 1:
# print(matlen,j,i)
mcts(circuit, q1, q2[j], ancils, genvec(matlen,i))
return circuit
def gate_aza(circuit,N,q1,q2,q3,q4):
matrix = [[1, 0, 0, 0], [0, 1/math.sqrt(2), 1/math.sqrt(2), 0], [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], [0,0,0,1]]
for i in range(N):
circuit.unitary(matrix,[q1[i],q2[i]])
circuit.barrier()
for i in range(N):
circuit.x(q2[i])
circuit.ccx(q1[i],q2[i],q3)
circuit.x(q2[i])
circuit.barrier()
#hogehoge
circuit.x(q3)
for i in range(N):
circuit.cu1(-2**i, q3, q4[i])
circuit.x(q3)
for i in range(N):
circuit.cu1(2**i, q3, q4[i])
circuit.barrier()
for i in range(N):
circuit.x(q2[i])
circuit.ccx(q1[i],q2[i],q3)
circuit.x(q2[i])
circuit.barrier()
for i in range(N):
circuit.unitary(matrix,[q1[i],q2[i]])
def entire_circuit(hamil):
matlen = len(hamil)
N = int(math.log(matlen,2))
q1 = QuantumRegister(N)
q2 = QuantumRegister(N)
q3 = QuantumRegister(1)
q4 = QuantumRegister(N)
ancils = QuantumRegister(N)
cr = ClassicalRegister(N)
circuit = QuantumCircuit(q1,q2,q3,q4,ancils,cr)
# print(q1)
# gates M and W
circuit_mw = gate_mw(hamil,N,q1,q2,q3,q4,ancils)
circuit = circuit.combine(circuit_mw)
circuit.barrier()
# gates A, Z, and their inversions
gate_aza(circuit,N,q1,q2,q3,q4)
circuit.barrier()
# mw inversion
circuit_mw_inverted = circuit_mw.inverse()
circuit = circuit.combine(circuit_mw_inverted)
return circuit
# entire_circuit(
# [
# [0,2,0,0],
# [2,0,0,0],
# [0,0,0,1],
# [0,0,1,0]
# ]
# ).draw(output = "mpl")
|
https://github.com/martian17/qiskit-graph-coloring-hamiltonian
|
martian17
|
import math
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, BasicAer
from qiskit.visualization import plot_histogram
# 4,0 -> 00
# 4,1 -> 01
# 4,2 -> 10
# 4,3 -> 11
# 4, 3 -> 11
# 2,3 -> 1,1
# 1,1 -> exit
def genvec(veclen, n):
vec = []
veclen = int(veclen/2)
while veclen > 0:
if veclen <= n:
n = n - veclen
vec.append(1)
else:
vec.append(0)
veclen = int(veclen/2)
vec.reverse()
return vec
def convertToState(v): # converts from [0,0,1,0] to [1,0]
# initializing the returning vector
lenv = len(v)
for i in range(lenv):
if v[i] != 0:
return genvec(lenv,i)
return False
def mcts(circuit, controls, target, ancilla, activq):# multi cubit toffoli select
for i in range(len(controls)):
if activq[i] == 0:
circuit.x(controls[i])
circuit.mct(controls, target, ancilla, 'basic')
for i in range(len(controls)):
if activq[i] == 0:
circuit.x(controls[i])
def all0(v):
for e in v:
if e != 0:
return False
return True
def fnon0(v):
for e in v:
if e != 0:
return e
return False
def vdeg(v):
for i in range(len(v)):
e = v[i]
if e != 0:
return i
return False
def vecinfo(v):
for i in range(len(v)):
e = v[i]
if e != 0:
return [e,i]
return False
def make_circuit(hamil):
matlen = len(hamil)
N = int(math.log(matlen,2))
q1 = QuantumRegister(N)
q2 = QuantumRegister(N)
q3 = QuantumRegister(1)
q4 = QuantumRegister(N)
ancils = QuantumRegister(N)
cr = ClassicalRegister(N)
circuit = QuantumCircuit(q1,q2,q3,q4,ancils,cr)
print(q1)
# w
for i in range(matlen):
if all0(hamil[i]):
continue # if there is no correcponding state
else:
# find the target
val = fnon0(hamil[i])
targetLocation = genvec(matlen,val) # weight
print(hamil[i],i,targetLocation)
for j in range(N):# for each controlled output
if targetLocation[j] == 1:
print(matlen,j,i)
mcts(circuit, q1, q4[j], ancils, genvec(matlen,i))
circuit.barrier()
print("b")
# m
for i in range(matlen):
if all0(hamil[i]):
continue # if there is no correcponding state
else:
# find the target
val = fnon0(hamil[i])
targetLocation = convertToState(hamil[i]) # multiplication result
print(hamil[i],i,targetLocation)
for j in range(N):# for each controlled output
if targetLocation[j] == 1:
print(matlen,j,i)
mcts(circuit, q1, q2[j], ancils, genvec(matlen,i))
# f
# -a
# -m
return circuit
make_circuit(
[
[0,2,0,0],
[2,0,0,0],
[0,0,0,1],
[0,0,1,0]
]
).draw(output = "mpl")
|
https://github.com/VicentePerezSoloviev/QAOA_BNSL_IBM
|
VicentePerezSoloviev
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from QAOA_gen import QAOA
import pandas as pd
import random
import numpy as np
from qiskit import Aer, execute
progress = []
def random_init_parameters(layers):
random_float_list = []
for i in range(2*layers):
x = random.uniform(0, np.pi)
random_float_list.append(x)
return random_float_list
def get_qaoa_circuit(p, n, beta, gamma, alpha1, alpha2, weights):
qaoa = QAOA(n=n, alpha1=alpha1, alpha2=alpha2, weights=weights)
qaoa.add_superposition_layer()
qaoa.add_layer(p, beta, gamma)
qaoa.measure()
# dt = pd.DataFrame(columns=['state', 'prob', 'cost'])
# my_circuit = qaoa.my_program.to_circ() # Export this program into a quantum circuit
# my_circuit = qaoa.circuit
# print(my_circuit.parameters)
'''for i in range(p):
my_circuit = my_circuit.bind_parameters({"g" + str(i): gamma[i], "b" + str(i): beta[i]})'''
# qaoa.circuit.bind_parameters({})
return qaoa.circuit, qaoa
def get_black_box_objective(p, n, alpha1, alpha2, weights, nbshots, alpha, noise=None):
def f(theta):
beta = theta[:p]
gamma = theta[p:]
global progress
my_circuit, qaoa = get_qaoa_circuit(p, n, beta, gamma, alpha1, alpha2, weights)
dt = pd.DataFrame(columns=['state', 'prob', 'cost'])
# Create a job
# job = my_circuit.to_job(nbshots=nbshots)
backend = Aer.get_backend('qasm_simulator')
# Execute
if noise is not None:
# qpu_predef = NoisyQProc(hardware_model=noise)
# result = qpu_predef.submit(job)
# print('ei')
job = execute(my_circuit, backend, shots=nbshots, noise_model=noise)
else:
# print('ei')
# result = get_default_qpu().submit(job)
job = execute(my_circuit, backend, shots=nbshots)
result = job.result().get_counts()
avr_c = 0
for sample in result:
cost = qaoa.evaluate_solution(str(sample))
dt = dt.append({'state': str(sample),
'prob': float(result[sample]/nbshots),
'cost': cost}, ignore_index=True)
# avr_c = avr_c + (sample.probability * cost)
# Conditional Value at Risk (CVaR)
aux = int(len(dt) * alpha)
dt = dt.sort_values(by=['prob'], ascending=False).head(aux)
# dt = dt.nlargest(aux, 'cost')
dt = dt.reset_index()
# print(dict(dt.loc[0]))
sum_parc = dt['cost'].sum()
for i in range(len(dt)):
avr_c = avr_c + (float(dt.loc[i, 'prob'])*float(dt.loc[i, 'cost'])/sum_parc)
# print(dt.loc[i, 'prob'], dt.loc[i, 'cost'], avr_c)
progress.append(avr_c)
# print(avr_c)
return avr_c # negative when we want to maximize
# return min(dt['cost'])
return f
|
https://github.com/VicentePerezSoloviev/QAOA_BNSL_IBM
|
VicentePerezSoloviev
|
# all libraries used by some part of the VQLS-implementation
from qiskit import (
QuantumCircuit, QuantumRegister, ClassicalRegister,
Aer, execute, transpile, assemble
)
from qiskit.circuit import Gate, Instruction
from qiskit.quantum_info.operators import Operator
from qiskit.extensions import ZGate, YGate, XGate, IGate
from scipy.optimize import (
minimize, basinhopping, differential_evolution,
shgo, dual_annealing
)
import random
import numpy as np
import cmath
from typing import List, Set, Dict, Tuple, Optional, Union
# import the params object of the GlobalParameters class
# this provides the parameters used to desribed and model
# the problem the minimizer is supposed to use.
from GlobalParameters import params
# import the vqls algorithm and corresponding code
from vqls import (
generate_ansatz,
hadamard_test,
calculate_beta,
calculate_delta,
calculate_local_cost_function,
minimize_local_cost_function,
postCorrection,
_format_alpha,
_calculate_expectationValue_HadamardTest,
_U_primitive
)
# The user input for the VQLS-algorithm has to be given
# when params is initialized within GlobalParameters.py
# The decomposition for $A$ has to be manually
# inserted into the code of
# the class GlobalParameters.
print(
"This program will execute a simulation of the VQLS-algorithm "
+ "with 4 qubits, 4 layers in the Ansatz and a single Id-gate acting"
+ " on the second qubit.\n"
+ "To simulate another problem, one can either alter _U_primitive "
+ "in vqls.py to change |x_0>, GlobalParameters.py to change A "
+ "or its decomposition respectively."
)
# Executing the VQLS-algorithm
alpha_min = minimize_local_cost_function(params.method_minimization)
"""
Circuit with the $\vec{alpha}$ generated by the minimizer.
"""
# Create a circuit for the vqls-result
qr_min = QuantumRegister(params.n_qubits)
circ_min = QuantumCircuit(qr_min)
# generate $V(\vec{alpha})$ and copy $A$
ansatz = generate_ansatz(alpha_min).to_gate()
A_copy = params.A.copy()
if isinstance(params.A, Operator):
A_copy = A_copy.to_instruction()
# apply $V(\vec{alpha})$ and $A$ to the circuit
# this results in a state that is approximately $\ket{b}$
circ_min.append(ansatz, qr_min)
circ_min.append(A_copy, qr_min)
# apply post correction to fix for sign errors and a "mirroring"
# of the result
circ_min = postCorrection(circ_min)
"""
Reference circuit based on the definition of $\ket{b}$.
"""
circ_ref = _U_primitive()
"""
Simulate both circuits.
"""
# the minimizations result
backend = Aer.get_backend(
'statevector_simulator')
t_circ = transpile(circ_min, backend)
qobj = assemble(t_circ)
job = backend.run(qobj)
result = job.result()
print(
"This is the result of the simulation.\n"
+ "Reminder: 4 qubits and an Id-gate on the second qubit."
+ "|x_0> was defined by Hadamard gates acting on qubits 0 and 3.\n"
+ "The return value of the minimizer (alpha_min):\n"
+ str(alpha_min)
+ "\nThe resulting statevector for a circuit to which "
+ "V(alpha_min) and A and the post correction were applied:\n"
+ str(result.get_statevector())
)
t_circ = transpile(circ_ref, backend)
qobj = assemble(t_circ)
job = backend.run(qobj)
result = job.result()
print(
"And this is the statevector for the reference circuit: A |x_0>\n"
+ str(result.get_statevector())
)
print("these were Id gates and in U y on 0 and 1")
|
https://github.com/VicentePerezSoloviev/QAOA_BNSL_IBM
|
VicentePerezSoloviev
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
We use n(n-1) qubits for the adj matrix and n(n-1)/2 qubits for the transition matrix
"""
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from itertools import combinations
m = 2
def mapping_mat_vec(n, row, col):
index = 0
for i in range(n):
for j in range(n):
if i != j:
if i == row and j == col:
return index
else:
index = index + 1
def mapping_vec_mat(n, index):
index_new = 0
for i in range(n):
for j in range(n):
if i != j:
if index == index_new:
return i, j
else:
index_new = index_new + 1
def state_2_str(state):
return str(state)[1:len(str(state)) - 1]
class QAOA:
layers = 0
def __init__(self, n, alpha1, alpha2, weights):
assert isinstance(weights, dict), 'Length of weights matrix is different than expected'
self.n = n
self.alpha1 = alpha1
self.alpha2 = alpha2
self.q_adj = n * (n - 1) # number of qubits for the adj matrix
self.q_r = (n * (n - 1)) / 2 # number of qubits for the transition matrix
# weights is a matrix of elements whose keys are tuples of first element the target and following the parents
self.weights = weights
# Create quantum circuit
nqubits = int(self.q_adj + self.q_r)
self.qreg = QuantumRegister(nqubits)
self.creg = ClassicalRegister(nqubits)
self.circuit = QuantumCircuit(self.qreg, self.creg)
self.adders = []
self.gen_adders()
def index_adj_adder(self, i, j):
assert i != j, "Diagonal adjacency indexes must not be taken into account"
if j > i:
return (i * self.n) + j - (i + 1)
else:
return (i * self.n) + j - i
def index_r_adder(self, i, j):
assert i < j, "Diagonal r indexes must not be taken into account"
aux = self.n * (self.n - 1)
return aux + (i * self.n) + j - int(((i + 2) * (i + 1)) / 2)
def gen_adders(self):
# Transcription of the general formulas of hamiltonian to general indexes of qubits
for i in range(self.n):
for j in range(i + 1, self.n):
for k in range(j + 1, self.n):
self.adders.append([self.alpha1, self.index_r_adder(i, k)])
self.adders.append([self.alpha1, self.index_r_adder(i, j), self.index_r_adder(j, k)])
self.adders.append([-self.alpha1, self.index_r_adder(i, j), self.index_r_adder(i, k)])
self.adders.append([-self.alpha1, self.index_r_adder(j, k), self.index_r_adder(i, k)])
for i in range(self.n):
for j in range(i + 1, self.n):
self.adders.append([self.alpha2, self.index_adj_adder(j, i), self.index_r_adder(i, j)])
self.adders.append([self.alpha2, self.index_adj_adder(i, j)])
self.adders.append([-self.alpha2, self.index_adj_adder(i, j), self.index_r_adder(i, j)])
def evaluate_solution(self, string):
to_bin = []
for i in range(len(string)):
to_bin.append(int(string[i]))
cost = 0
# multiplication of combination of 2-nodes and weight(node|2parents)
for i in range(self.n):
# array = to_bin[i * (self.n - 1): i * (self.n - 1) + (self.n - 1)] # separate each row of adj matrix
array = [[to_bin[mapping_mat_vec(self.n, k, i)], mapping_mat_vec(self.n, k, i)]
for k in range(self.n) if k != i] # separate each col adj m
sum_col = sum([k[0] for k in array])
if sum_col > m:
# cases with more than m parents
cost = cost + 99999999 # penalize
else:
# cases of 0, 1 or 2 parents
# find index of each 1
# indexes = [j * (self.n-1) for j, x in enumerate(array) if x == 1] # index general array(no diagonal)
indexes = [k[1] for k in array if k[0] == 1] # index general array(no diagonal)
if len(indexes) == 1:
# weight (i | index)
row, col = mapping_vec_mat(self.n, indexes[0])
value = self.weights[i, row]
cost = cost + value
elif len(indexes) == 2:
# weight (i | index, index)
row1, col1 = mapping_vec_mat(self.n, indexes[0])
row2, col2 = mapping_vec_mat(self.n, indexes[1])
value = self.weights[i, row1, row2]
cost = cost + value
else:
# 0
cost = cost + self.weights[i]
pass
# restrictions
for i in self.adders:
if len(i) == 2:
cost = cost + i[0] * to_bin[i[1]]
if len(i) == 3:
cost = cost + i[0] * (to_bin[i[1]] * to_bin[i[2]])
return cost
def add_superposition_layer(self):
# Superposition
for i in range(len(self.qreg)):
self.circuit.h(self.qreg[i])
def spin_mult(self, spins, gamma):
if len(spins) == 0 or len(spins) > 4:
raise Exception('number of spins does not match the function requirements')
if not isinstance(spins, list):
raise Exception('A list is required as argument "spins"')
for i in range(len(spins) - 1):
self.circuit.cnot(spins[i], spins[len(spins) - 1])
self.circuit.rz(gamma, spins[len(spins) - 1])
for i in range(len(spins) - 2, -1, -1):
self.circuit.cnot(spins[i], spins[len(spins) - 1])
def adj_mult(self, adjs, gamma, coef):
if not isinstance(adjs, list):
raise Exception('A list is required as argument "adjs"')
if len(adjs) == 0 or len(adjs) > 4:
raise Exception('number of adj indexes does not match the function requirements')
angle = coef * (gamma * 2) / (2 ** (len(adjs)))
for adj in adjs:
self.circuit.rz(-angle, self.qreg[adj])
for tam in range(2, len(adjs) + 1):
for comb in combinations(adjs, tam):
self.spin_mult([self.qreg[i] for i in list(comb)], angle)
def add_layer(self, nlayers, beta, gamma):
for lay in range(nlayers):
# Phase Operator
# multiplication of each isolated and weight(node|1parent)
for i in range(self.n):
for j in range(self.n):
if i != j:
# in qubit i, j is the weight of j->i
value = (-1) * (self.weights[i, j] - self.weights[i]) # subtract w_i({null})
self.circuit.rz(gamma[lay] * value, self.qreg[self.index_adj_adder(j, i)])
# multiplication of combination of 2-nodes and weight(node|2parents) in same adj col
for i in range(self.n):
array = [k for k in range(self.n) if k != i]
perm = combinations(array, m)
for per in perm:
# i | perm, perm
value = self.weights[i, per[0], per[1]] + self.weights[i] - \
self.weights[i, per[0]] - self.weights[i, per[1]]
self.adj_mult([self.index_adj_adder(per[0], i), self.index_adj_adder(per[1], i)],
gamma[lay], value) # coef = 1 -> not in the restrictions
# multiplication of each of the couple restrictions
for i in self.adders:
self.adj_mult(i[1:], gamma[lay], i[0])
# Mixing Operator
for i in range(len(self.qreg)):
self.circuit.rx(2*beta[lay], self.qreg[i])
def measure(self):
self.circuit.measure(range(len(self.qreg)), range(len(self.qreg)-1, -1, -1))
|
https://github.com/MuhammadMiqdadKhan/Improvement-of-Quantum-Circuits-Using-H-U-H-Sandwich-Technique-with-Diagonal-Matrix-Implementation
|
MuhammadMiqdadKhan
|
%matplotlib inline
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(2)
print(u)
qc = QuantumCircuit(2)
qc.iso(u, [0], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(2)
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (2):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(2)
qc.diagonal(c2, [0])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(4)
print(u)
qc = QuantumCircuit(4)
qc.iso(u, [0,1], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(4)/2
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (4):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(4)
qc.diagonal(c2, [0, 1])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(8)
print(u)
qc = QuantumCircuit(4)
qc.iso(u, [0, 1, 2], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(8)/3
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (8):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(4)
qc.diagonal(c2, [0, 1, 2])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(16)
print(u)
qc = QuantumCircuit(4)
qc.iso(u, [0, 1, 2, 3], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(16)/4
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (16):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(4)
qc.diagonal(c2, [0, 1, 2, 3])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(32)
print(u)
qc = QuantumCircuit(8)
qc.iso(u, [0, 1, 2, 3, 4], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(32)/5
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (32):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(8)
qc.diagonal(c2, [0, 1, 2, 3, 4])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(64)
print(u)
qc = QuantumCircuit(8)
qc.iso(u, [0, 1, 2, 3, 4, 5], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(64)/6
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (64):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(8)
qc.diagonal(c2, [0, 1, 2, 3, 4, 5])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(128)
print(u)
qc = QuantumCircuit(8)
qc.iso(u, [0, 1, 2, 3, 4, 5, 6], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(128)/7
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (128):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(8)
qc.diagonal(c2, [0, 1, 2, 3, 4, 5, 6])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(256)
print(u)
qc = QuantumCircuit(8)
qc.iso(u, [0, 1, 2, 3, 4, 5, 6, 7], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(256)/8
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (256):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(8)
qc.diagonal(c2, [0, 1, 2, 3, 4, 5, 6, 7])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(512)
print(u)
qc = QuantumCircuit(16)
qc.iso(u, [0, 1, 2, 3, 4, 5, 6, 7, 8], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(512)/9
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (512):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(16)
qc.diagonal(c2, [0, 1, 2, 3, 4, 5, 6, 7, 8])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy.stats import unitary_group
u = unitary_group.rvs(1024)
print(u)
qc = QuantumCircuit(16)
qc.iso(u, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
import scipy
from scipy import linalg
h = scipy.linalg.hadamard(1024)/10
import numpy as np
u1 = np.dot(h, u)
u2 = np.dot(u1, h)
c2 = []
for i in range (1024):
c2.append(u2[i,i])
print(c2)
qc = QuantumCircuit(16)
qc.diagonal(c2, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=3)
circuit_drawer(qc)
|
https://github.com/agneya-1402/Quantum-HalfAdder
|
agneya-1402
|
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
from qiskit import *
# Loading your IBM Quantum account(s)
#provider = IBMQ.load_account()
qc_ha = QuantumCircuit(4,2)
qc_ha.x(0)
qc_ha.x(1)
qc_ha.barrier()
qc_ha.cx(0,2)
qc_ha.cx(1,2)
qc_ha.ccx(0,1,3)
qc_ha.barrier()
qc_ha.measure(2,0)
qc_ha.measure(3,1)
qc_ha.draw()
sim = Aer.get_backend('qasm_simulator')
qobj = assemble(qc_ha)
counts = sim.run(qobj).result().get_counts()
plot_histogram(counts)
|
https://github.com/Dynamic-Vector/Qubit-Visualizer
|
Dynamic-Vector
|
from tkinter import *
import tkinter as tk
import numpy as np
from qiskit import QuantumCircuit
from qiskit.visualization import visualize_transition
from tkinter import LEFT,END,DISABLED,NORMAL
import warnings
warnings.filterwarnings('ignore')
#Initalize the Quantum circuit
def initialize_circuit():
"""
Initializes the Quantum Circuit
"""
global circuit
circuit=QuantumCircuit(1)
initialize_circuit()
theta=0
#Define Display Function
def display_gate(gate_input):
"""
Adds a corresponding gate notation in the display to track the operations.
If the number of operation reach ten,all gate buttons are disabled.
"""
#Insert the defined gate
display.insert(END,gate_input)
#Check if the number of operations has reached ten,if yes,
#disable all the gate buttons
input_gates = display.get()
num_gates_pressed = len(input_gates)
list_input_gates= list(input_gates)
search_word = ["R","D"]
count_double_valued_gates = [list_input_gates.count(i) for i in search_word]
num_gates_pressed -= sum(count_double_valued_gates)
if num_gates_pressed == 10:
gates=[x,y,z,s,sd,h,t,td]
for gate in gates:
gate.config(state=DISABLED)
#Define Del Function
def clear(circuit):
"""
clears the display!
Reintializes the Quantum circuit for fresh calculation!
Checks if the gate buttons are disabled,if so,enables the buttons
"""
#clear the display
display.delete(0,END)
#reset the circuit to initial state |0>
initialize_circuit()
#Checks if the buttons are disabled and if so,enables them
if x['state']== DISABLED:
gates=[x,y,z,s,sd,h,t,td]
for gate in gates:
gate.config(state=NORMAL)
def user_input(circuit,key):
"""Take the user input for rotation for paramaterized
Rotation gates Rx,Ry,Rz
"""
#Initialize adn define the properties of window
get=tk.Tk()
get.title("Enter theta")
get.geometry("320x80")
get.resizable(0,0)
val1=tk.Button(get,height=2,width=10,text="PI/4",command=lambda:change_theta(0.25,get,circuit,key))
val1.grid(row=0,column=0)
val2=tk.Button(get,height=2,width=10,text="PI/2",command=lambda:change_theta(0.50,get,circuit,key))
val2.grid(row=0,column=1)
val3=tk.Button(get,height=2,width=10,text="PI",command=lambda:change_theta(1.0,get,circuit,key))
val3.grid(row=0,column=2)
val4=tk.Button(get,height=2,width=10,text="2*PI",command=lambda:change_theta(2.0,get,circuit,key))
val4.grid(row=0,column=3,sticky='w')
val5=tk.Button(get,height=2,width=10,text="-PI/4",command=lambda:change_theta(-0.25,get,circuit,key))
val5.grid(row=1,column=0)
val6=tk.Button(get,height=2,width=10,text="-PI/2",command=lambda:change_theta(-0.50,get,circuit,key))
val6.grid(row=1,column=1)
val7=tk.Button(get,height=2,width=10,text="-PI",command=lambda:change_theta(-1.0,get,circuit,key))
val7.grid(row=1,column=2)
val8=tk.Button(get,height=2,width=10,text="-2*PI",command=lambda:change_theta(-2.0,get,circuit,key))
val8.grid(row=1,column=3,sticky='w')
get.mainloop()
def change_theta(num,window,circuit,key):
global theta
theta = num* np.pi
if key=='x':
circuit.rx(theta,0)
thera=0
elif key=='y':
circuit.ry(theta,0)
theta=0
else :
circuit.rz(theta,0)
theta=0
window.destroy()
#Attributes
background='#2c94c8'
buttons='#d9d9d9'
special_buttons='#bc3454'
button_font=('Roboto',18,)
display_font=('Roboto',28)
#Define Window
window = tk.Tk()
window.iconbitmap(default='res/logo.ico')
window.title("Quantum Labs")
window.geometry("816x560")
window.configure(bg = "#ffffff")
canvas = Canvas(
window,
bg = "#ffffff",
height = 560,
width = 816,
bd = 0,
highlightthickness = 0,
relief = "ridge")
canvas.place(x = 0, y = 0)
background_img = PhotoImage(file = f"res/background.png")
background = canvas.create_image(
260, 279,
image=background_img)
#Add the display
text_box_bg = tk.PhotoImage("res/TextBox_Bg.png")
display_img = canvas.create_image(650.5, 267.5, image=text_box_bg)
display = tk.Entry(bd=0, bg="#bfc0de",font=display_font, highlightthickness=0)
display.place(x=485, y=125, width=270, height=45)
#Add the buttons
x = Button(
window,
text = "X",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('X'),circuit.x(0)])
x.place(x = 485, y = 200
, width = 60, height = 30)
y= Button(
window,
text = "Y",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('Y'),circuit.y(0)])
y.place(x = 590, y = 200
, width = 60, height = 30)
z= Button(
window,
text = "Z",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('Z'),circuit.z(0)])
z.place(x = 695, y = 200
, width = 60, height = 30)
s= Button(
window,
text = "S",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('S'),circuit.s(0)])
s.place(x = 485, y = 275
, width = 60, height = 30)
sd= Button(
window,
text = "SD",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('SD'),circuit.sdg(0)])
sd.place(x = 590, y = 275
, width = 60, height = 30)
h= Button(
window,
text = "H",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('H'),circuit.h(0)])
h.place(x = 695, y = 275
, width = 60, height = 30)
rx= Button(
window,
text = "RX",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('RX'),user_input(circuit, 'x')])
rx.place(x = 485, y = 350
, width = 60, height = 30)
ry= Button(
window,
text = "RY",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('RY'),user_input(circuit, 'y')])
ry.place(x = 590, y = 350,
width = 60, height = 30)
rz= Button(
window,
text = "RZ",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('RZ'),user_input(circuit, 'z')])
rz.place(x = 695, y = 350,
width = 60, height = 30)
t= Button(
window,
text = "T",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('T'),circuit.t(0)])
t.place(x = 485, y = 425
, width = 60, height = 30)
td= Button(
window,
text = "TD",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:[display_gate('TD'),circuit.tdg(0)])
td.place(x = 590, y = 425,
width = 60, height = 30)
clean = Button(
window,
text = "DEL",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command=lambda:clear(circuit))
clean.place(x = 695, y = 425
, width = 60, height = 30)
visualize = Button(
window,
text = "VISUALIZE",
font = button_font,
bg = buttons,
bd = 0,
highlightthickness = 0,
relief = "ridge",
command =lambda:visualize_transition(circuit,trace=False,saveas=None,spg=2,fpg=100))
visualize.place(x = 540, y = 490
, width = 160, height = 40)
window.resizable(False, False)
window.mainloop()
|
https://github.com/helenup/Quantum-Euclidean-Distance
|
helenup
|
# import the necessary libraries
import math as m
from qiskit import *
from qiskit import BasicAer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# First step is to encode the data into quantum states.
#There are some techniques to do it, in this case Amplitude embedding was used.
A = [2,9,8,5]
B = [7,5,10,3]
norm_A = 0
norm_B = 0
Dist = 0
for i in range(len(A)):
norm_A += A[i]**2
norm_B += B[i]**2
Dist += (A[i]-B[i])**2
Dist = m.sqrt(Dist)
A_norm = m.sqrt(norm_A)
B_norm = m.sqrt(norm_B)
Z = round( A_norm**2 + B_norm**2 )
# create phi and psi state with the data
phi = [A_norm/m.sqrt(Z),-B_norm/m.sqrt(Z)]
psi = []
for i in range(len(A)):
psi.append(((A[i]/A_norm) /m.sqrt(2)))
psi.append(((B[i]/B_norm) /m.sqrt(2)))
# Quantum Circuit
q1 = QuantumRegister(1,name='q1')
q2 = QuantumRegister(4,name='q2')
c = ClassicalRegister(1,name='c')
qc= QuantumCircuit(q1,q2,c)
# states initialization
qc.initialize( phi, q2[0] )
qc.initialize( psi, q2[1:4] )
# The swap test operator
qc.h( q1[0] )
qc.cswap( q1[0], q2[0], q2[1] )
qc.h( q1[0] )
qc.measure(q1,c)
display(qc.draw(output="mpl"))
## Results
shots = 10000
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=shots)
job_result = job.result()
counts = job_result.get_counts(qc)
x = abs(((counts['0']/shots - 0.5)/0.5)*2*Z)
Q_Dist = round(m.sqrt(x),4)
print('Quantum Distance: ', round(Q_Dist,3))
print('Euclidean Distance: ',round(Dist,3))
|
https://github.com/helenup/Quantum-Euclidean-Distance
|
helenup
|
from qiskit import *
from qiskit import BasicAer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
qreg = QuantumRegister(3, 'qreg')
creg = ClassicalRegister(3, 'creg')
qc = QuantumCircuit (qreg, creg)
# Initial state |01>
qc.x(qreg[1])
#swap_test
qc.h(qreg[0]) #Apply superposition on the ancilla qubit
qc.cswap( qreg[0], qreg[1], qreg[2] )
qc.h(qreg[0])
qc.barrier()
qc.measure(qreg[0], creg[0])
display(qc.draw(output="mpl"))
#Result
shots = 1024
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=shots)
job_result = job.result()
counts = job_result.get_counts(qc)
print(counts)
# The results agree with the swap test function, where if the P|0> = 0.5 on the ancilla(control) qubit
#means the states are orthogonal, and if the P|0>=1 indicates the states are identical.
|
https://github.com/murogrande/IBM-cert-exam-study-questions
|
murogrande
|
## import some libraries
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute, BasicAer, IBMQ
from math import sqrt
import qiskit
print(qiskit.__qiskit_version__)
qc =QuantumCircuit(3,3)
qc.h(0)
qc.cx(0,1)
qc.cx(0,2)
qc.h(2)
qc.cx(2,0)
print(qc.depth())
qc = QuantumCircuit(1)
qc.h(0)
qc.t(0)
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
from qiskit.visualization import plot_bloch_multivector
plot_bloch_multivector(result.get_statevector())
qc = QuantumCircuit(2)
qc.cx(0,1)
## you won't see the next line in the exam.
qc.draw('mpl')
#Answer is D
qc.draw('mpl',filename='test.png') ## check the folder where the notebook is located
qc = QuantumCircuit(2,2)
qc.h(0)
qc.x(1)
qc.measure([0,1],[0,1])
simulator=Aer.get_backend('qasm_simulator')
## Answer C
## here is the check
from qiskit.visualization import plot_histogram
job = execute(qc,simulator).result()
counts = job.get_counts()
print(counts)
plot_histogram(counts)
qreg_a = QuantumRegister(2)
qreg_b = QuantumRegister(2)
creg = ClassicalRegister(4)
qc = QuantumCircuit(qreg_a,qreg_b,creg)
qc.x(qreg_a[0])
qc.measure(qreg_a,creg[0:2])
qc.measure(qreg_b,creg[2:4])
simulator= BasicAer.get_backend('qasm_simulator')
result= execute(qc,simulator).result()
counts = result.get_counts(qc)
## check the answer
print(counts)
print("The answer is C ")
qc.draw('mpl')
from qiskit import QuantumCircuit
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0,1)
##Answer is C
qc.draw('png')
# import image module
from IPython.display import Image
### you won't see the following lines in the exam just the plot
# get the image
Image(url="random-unitary.png", width=600, height=600)
### in the exam you will just see the image
# import image module
from IPython.display import Image
# get the image
Image(url="circui1.png", width=300, height=300)
### in the exam you will just see the image
# A.
'''OPENQASM 2.0;
include "qelib1.inc";
qreg q[2];
creg c[2];
h.q[0];
barrier (q[0],q[1]);
z.q[1];
barrier (q[0], q[1]);
measure (q[0], c[0]);
measure (q[1], c[1]);
'''
# B
qc = QuantumCircuit(2,2)
qc.h(q[0])
qc.barrier(q[0],q[1])
qc.z(q[1])
qc.barrier(q[0],q[1])
m = measure(q[0] -> c[0])
m += measure(q[1] -> c[1])
qc=qc+m
# C
qc = QuantumCircuit(2,2)
qc.h(0)
qc.barrier(0,1)
qc.z(1)
qc.barrier(0,1)
qc.measure([0,1],[0,1])
#D
qc = QuantumCircuit(2,2)
qc.h(q[0])
qc.barrier(q[0],q[1])
qc.z(q[1])
qc.barrier(q[0],q[1])
m = measure(q[0], c[0])
m = measure(q[1], c[1])
qc=qc+m
### you won't see the following lines of code in the exam
from IPython.display import Image
Image(url="circui2.png", width=150, height=150)
# A
qr = QuantumRegister(2,'q')
a = QuantumRegister(1,'a')
cr = ClassicalRegister(3,'c')
qc = QuantumCircuit(qr,a,cr)
qc.h(qr[0:2])
qc.x(a[0])
# B
qr = QuantumRegister(2,'q')
a = QuantumRegister (1,'a')
cr = ClassicalRegister(3,'c')
qc = QuantumCircuit(cr,a,qr)
qc.h(qr[0:2])
qc.x(a[0])
#C
qr = QuantumRegister(2,'q')
a = QuantumRegister (1,'a')
cr = ClassicalRegister(3,'c')
qc = QuantumCircuit(qr,a,cr)
qc.h(qr[0:1])
qc.x(a[0])
#D
qr = QReg(2,'q')
a = QReg (1,'a')
cr = CReg(3,'c')
qc = QuantumCircuit(qr,a,cr)
qc.h(qr[0:2])
qc.x(a[0])
from qiskit.tools.monitor import *
provider = IBMQ.load_account()
#provider.backends() ## this line of code can be important becuase it could be a question of your exam.
#In other words, how do you know the backends of the provider?
backend= provider.get_backend('ibmq_qasm_simulator')
qr = QuantumRegister(2)
cr= ClassicalRegister(2)
qc = QuantumCircuit(qr,cr)
qc.h(qr[0])
qc.cx(qr[0],qr[1])
qc.measure(qr,cr)
job = execute(qc,backend)
job.status()
job_monitor(job)
#### another could be job_watcher for jupyternoote book
from qiskit.tools.jupyter import job_watcher
%qiskit_job_watcher
job = backend.retrieve_job('61f20ee81faa0605383485a7')
result = job.result()
counts = result.get_counts()
print(counts)
qc = QuantumCircuit(3)
qc.initialize('01',[0,2])
qc.draw()
print(qc.decompose())
from qiskit.visualization import plot_error_map, plot_gate_map
backend = provider.get_backend('ibmq_quito')
plot_error_map(backend)
plot_gate_map(backend)
#A
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.crz(pi,0,1)
qc.crz(-pi,0,1)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
#B
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.crz(pi,0,1)
qc.cp(pi,0,1)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
#C
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.cz(0,1)
qc.cz(1,0)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
qc.draw()
#D
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.cz(0,1)
qc.cp(pi,0,1)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
import qiskit.tools.jupyter
%qiskit_backend_overview
from qiskit import QuantumCircuit
qc = QuantumCircuit(3)
#insert code fragment here
#Output
### you won't see the following lines of code in the exam, just focus on the figure
from IPython.display import Image
Image(url="imageassesment1.png", width=350, height=350)
#A
qc.measure_all()
#B
qc = QuantumCircuit(3)
qc.measure()
#C
qc = QuantumCircuit(3)
qc.measure(0,0)
qc.measure(1,1)
qc.measure(2,2)
#D
qc = QuantumCircuit(3)
for n in range(len(qc.qubits)):
qc.measure(n,n)
qc.qubits
## here you need to display each line or lines of code before each barrier in the Qsphere, the question is about
#to put in order the sequence of states that will appear in the Qsphere.
qc = QuantumCircuit(3)
qc.x(1)
qc.barrier()
qc.h(0)
qc.h(1)
qc.h(2)
qc.barrier()
qc.z(1)
qc.barrier()
qc.h(0)
qc.h(1)
qc.h(2)
qc.draw('mpl')
from qiskit.visualization import plot_state_qsphere
simulator= Aer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
statevector = result.get_statevector(qc)
plot_state_qsphere(statevector)
from qiskit import BasicAer, Aer, execute
qc = QuantumCircuit(1)
qc.h(0)
#insert code fragment here
print(unitary)
#A
simulator = BasicAer.get_backend('unitary_simulator')
unitary = execute(qc,simulator).get_unitary(qc)
#B
simulator = Aer.get_backend('unitary_simulator')
result = execute(qc,simulator).result()
unitary = result.get_unitary(qc)
#C
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
unitary = result.get_matrix_result(qc)
#D
simulator = BasicAer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
unitary = result.get_unitary(qc)
#E
simulator = BasicAer.get_backend('unitary_simulator')
result = execute(qc,simulator).result()
unitary = result.get_unitary()
from qiskit.visualization import plot_bloch_vector
from math import pi, sqrt
plot_bloch_vector(vector)
#A
vector = [1,-1,0]
#B
vector = [pi/2,-pi/4,0]
#C
vector = [1/sqrt(2),-1/sqrt(2),0]
#D
vector = [1/sqrt(2),-1/sqrt(2),-1]
from qiskit.visualization import plot_state_qsphere
qc = QuantumCircuit(3)
qc.h(0)
#qc.z(0)
qc.x(1)
qc.cx(0,1)
qc.x(2)
qc.cx(1,2)
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc, backend).result()
statevector= job.get_statevector()
plot_state_qsphere(statevector)
qc = QuantumCircuit(1)
qc.x(0)
qc.h(0)
simulator = Aer.get_backend('unitary_simulator')
job = execute(qc,simulator)
result = job.result()
outputstate = result.get_unitary(qc,1)
print(outputstate)
qc = QuantumCircuit(3,3)
qc.h([0,1,2])
qc.barrier()
qc.measure([0,1,2],range(3))
qc.draw()
print(qc.qasm())
qasm_sim = Aer.get_backend('qasm_simulator')
qc= QuantumCircuit(3)
qc.x([0,1,2])
qc.ccx(0,1,2)
qc.measure_all()
result = execute(qc,qasm_sim).result()
counts = result.get_counts()
print(counts)
qc= QuantumCircuit(3)
qc.ct()
from qiskit.quantum_info import DensityMatrix
matrix1 = [
[1,0],[0,0]
]
matrix2 = [
[0.5,0.5],[0.5,0.5]
]
#A
result= DensityMatrix.tensor(matrix1,matrix2)
print(result)
#B
matrix1 = DensityMatrix(matrix1)
print(matrix1.tensor(matrix2))
#C
print(matrix1.tensor(matrix2))
#D
print(DensityMatrix.tensor(matrix1,matrix2))
from qiskit.visualization import plot_state_city
qc = QuantumCircuit(2)
qc.h(0)
qc.x(1)
qc.cx(0,1)
qc.z(0)
simulator = BasicAer.get_backend('statevector_simulator')
job = execute(qc,simulator).result()
statevector = job.get_statevector()
plot_state_city(statevector)
qc = QuantumCircuit(1)
#A
#qc.ry(pi/2,0)
#qc.s(0)
#qc.rx(pi/2,0)
#B
#qc.ry(pi/2,0)
#qc.rx(pi/2,0)
#qc.s(0)
#C
#qc.s(0)
#qc.ry(pi/2,0)
#qc.rx(pi/2,0)
#D
qc.rx(pi/2,0)
qc.s(0)
qc.ry(pi/2,0)
qc.measure_all()
simulator = BasicAer.get_backend('qasm_simulator')
job = execute(qc,simulator).result()
counts = job.get_counts()
print(counts)
from qiskit.quantum_info import DensityMatrix
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0,1)
qc1= QuantumCircuit(2)
qc1.h(0)
qc1.x(1)
qc1.cx(0,1)
#qc.draw('mpl')
rho_qc=DensityMatrix.from_instruction(qc)
rho_qc.draw()
rho1= DensityMatrix.from_instruction(qc1)
rho1.draw()
qc1new = qc1.decompose()
qc1new.draw()
#tensor1 = DensityMatrix.from_label('[[0,1],[1,0]]')
qc = QuantumCircuit(2)
#v1,v2 = [0,1],[0,1]
v = [1/sqrt(2),0,0,1/sqrt(2)]
qc.initialize(v,[0,1])
qc.draw(output='mpl')
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc, simulator).result()
statevector = result.get_statevector()
print(statevector)
from qiskit.circuit.library import CXGate
ccx = CXGate().control()
qc = QuantumCircuit(3)
qc.append(ccx,[0,1,2])
qc.draw()
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute
qc= QuantumCircuit(3)
qc.barrier()
qc.barrier([0])
qc.draw()
qc = QuantumCircuit.from_qasm_file('myfile.qasm')
qc.measure_all()
qc.draw(output='latex_source')
from qiskit.quantum_info import Statevector
from qiskit.visualization import plot_state_qsphere, plot_state_paulivec, plot_state_city, plot_bloch_vector, plot_state_hinton, plot_bloch_multivector
stavec = Statevector.from_label('001')
plot_state_paulivec(stavec)
import qiskit.tools.jupyter
%qiskit_version_table
import qiskit.tools.jupyter
%qiskit_backend_overview
|
https://github.com/murogrande/IBM-cert-exam-study-questions
|
murogrande
|
## import some libraries
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute, BasicAer, IBMQ
from math import sqrt
import qiskit
print(qiskit.__qiskit_version__)
qc =QuantumCircuit(3,3)
qc.h(0)
qc.cx(0,1)
qc.cx(0,2)
qc.h(2)
qc.cx(2,0)
print(qc.depth())
qc = QuantumCircuit(1)
qc.h(0)
qc.t(0)
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
from qiskit.visualization import plot_bloch_multivector
plot_bloch_multivector(result.get_statevector())
qc = QuantumCircuit(2)
qc.cx(0,1)
## you won't see the next line in the exam.
qc.draw('mpl')
#Answer is D
qc.draw('mpl',filename='test.png') ## check the folder where the notebook is located
qc = QuantumCircuit(2,2)
qc.h(0)
qc.x(1)
qc.measure([0,1],[0,1])
simulator=Aer.get_backend('qasm_simulator')
## Answer C
## here is the check
from qiskit.visualization import plot_histogram
job = execute(qc,simulator).result()
counts = job.get_counts()
print(counts)
plot_histogram(counts)
qreg_a = QuantumRegister(2)
qreg_b = QuantumRegister(2)
creg = ClassicalRegister(4)
qc = QuantumCircuit(qreg_a,qreg_b,creg)
qc.x(qreg_a[0])
qc.measure(qreg_a,creg[0:2])
qc.measure(qreg_b,creg[2:4])
simulator= BasicAer.get_backend('qasm_simulator')
result= execute(qc,simulator).result()
counts = result.get_counts(qc)
## check the answer
print(counts)
print("The answer is C ")
qc.draw('mpl')
from qiskit import QuantumCircuit
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0,1)
##Answer is C
qc.draw('png')
# import image module
from IPython.display import Image
### you won't see the following lines in the exam just the plot
# get the image
Image(url="random-unitary.png", width=600, height=600)
### in the exam you will just see the image
# import image module
from IPython.display import Image
# get the image
Image(url="circui1.png", width=300, height=300)
### in the exam you will just see the image
# A.
'''OPENQASM 2.0;
include "qelib1.inc";
qreg q[2];
creg c[2];
h.q[0];
barrier (q[0],q[1]);
z.q[1];
barrier (q[0], q[1]);
measure (q[0], c[0]);
measure (q[1], c[1]);
'''
# B
qc = QuantumCircuit(2,2)
qc.h(q[0])
qc.barrier(q[0],q[1])
qc.z(q[1])
qc.barrier(q[0],q[1])
m = measure(q[0] -> c[0])
m += measure(q[1] -> c[1])
qc=qc+m
# C
qc = QuantumCircuit(2,2)
qc.h(0)
qc.barrier(0,1)
qc.z(1)
qc.barrier(0,1)
qc.measure([0,1],[0,1])
#D
qc = QuantumCircuit(2,2)
qc.h(q[0])
qc.barrier(q[0],q[1])
qc.z(q[1])
qc.barrier(q[0],q[1])
m = measure(q[0], c[0])
m = measure(q[1], c[1])
qc=qc+m
### you won't see the following lines of code in the exam
from IPython.display import Image
Image(url="circui2.png", width=150, height=150)
# A
qr = QuantumRegister(2,'q')
a = QuantumRegister(1,'a')
cr = ClassicalRegister(3,'c')
qc = QuantumCircuit(qr,a,cr)
qc.h(qr[0:2])
qc.x(a[0])
# B
qr = QuantumRegister(2,'q')
a = QuantumRegister (1,'a')
cr = ClassicalRegister(3,'c')
qc = QuantumCircuit(cr,a,qr)
qc.h(qr[0:2])
qc.x(a[0])
#C
qr = QuantumRegister(2,'q')
a = QuantumRegister (1,'a')
cr = ClassicalRegister(3,'c')
qc = QuantumCircuit(qr,a,cr)
qc.h(qr[0:1])
qc.x(a[0])
#D
qr = QReg(2,'q')
a = QReg (1,'a')
cr = CReg(3,'c')
qc = QuantumCircuit(qr,a,cr)
qc.h(qr[0:2])
qc.x(a[0])
from qiskit.tools.monitor import *
provider = IBMQ.load_account()
#provider.backends() ## this line of code can be important becuase it could be a question of your exam.
#In other words, how do you know the backends of the provider?
backend= provider.get_backend('ibmq_qasm_simulator')
qr = QuantumRegister(2)
cr= ClassicalRegister(2)
qc = QuantumCircuit(qr,cr)
qc.h(qr[0])
qc.cx(qr[0],qr[1])
qc.measure(qr,cr)
job = execute(qc,backend)
job.status()
job_monitor(job)
#### another could be job_watcher for jupyternoote book
from qiskit.tools.jupyter import job_watcher
%qiskit_job_watcher
job = backend.retrieve_job('61f20ee81faa0605383485a7')
result = job.result()
counts = result.get_counts()
print(counts)
qc = QuantumCircuit(3)
qc.initialize('01',[0,2])
qc.draw()
print(qc.decompose())
from qiskit.visualization import plot_error_map, plot_gate_map
backend = provider.get_backend('ibmq_quito')
plot_error_map(backend)
plot_gate_map(backend)
#A
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.crz(pi,0,1)
qc.crz(-pi,0,1)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
#B
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.crz(pi,0,1)
qc.cp(pi,0,1)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
#C
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.cz(0,1)
qc.cz(1,0)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
qc.draw()
#D
from qiskit import QuantumCircuit, Aer, execute
from math import pi
qc = QuantumCircuit(2)
qc.cz(0,1)
qc.cp(pi,0,1)
u_sim = Aer.get_backend('unitary_simulator')
unitary = execute(qc,u_sim).result().get_unitary()
print(unitary)
import qiskit.tools.jupyter
%qiskit_backend_overview
from qiskit import QuantumCircuit
qc = QuantumCircuit(3)
#insert code fragment here
#Output
### you won't see the following lines of code in the exam, just focus on the figure
from IPython.display import Image
Image(url="imageassesment1.png", width=350, height=350)
#A
qc.measure_all()
#B
qc = QuantumCircuit(3)
qc.measure()
#C
qc = QuantumCircuit(3)
qc.measure(0,0)
qc.measure(1,1)
qc.measure(2,2)
#D
qc = QuantumCircuit(3)
for n in range(len(qc.qubits)):
qc.measure(n,n)
qc.qubits
## here you need to display each line or lines of code before each barrier in the Qsphere, the question is about
#to put in order the sequence of states that will appear in the Qsphere.
qc = QuantumCircuit(3)
qc.x(1)
qc.barrier()
qc.h(0)
qc.h(1)
qc.h(2)
qc.barrier()
qc.z(1)
qc.barrier()
qc.h(0)
qc.h(1)
qc.h(2)
qc.draw('mpl')
from qiskit.visualization import plot_state_qsphere
simulator= Aer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
statevector = result.get_statevector(qc)
plot_state_qsphere(statevector)
from qiskit import BasicAer, Aer, execute
qc = QuantumCircuit(1)
qc.h(0)
#insert code fragment here
print(unitary)
#A
simulator = BasicAer.get_backend('unitary_simulator')
unitary = execute(qc,simulator).get_unitary(qc)
#B
simulator = Aer.get_backend('unitary_simulator')
result = execute(qc,simulator).result()
unitary = result.get_unitary(qc)
#C
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
unitary = result.get_matrix_result(qc)
#D
simulator = BasicAer.get_backend('statevector_simulator')
result = execute(qc,simulator).result()
unitary = result.get_unitary(qc)
#E
simulator = BasicAer.get_backend('unitary_simulator')
result = execute(qc,simulator).result()
unitary = result.get_unitary()
from qiskit.visualization import plot_bloch_vector
from math import pi, sqrt
plot_bloch_vector(vector)
#A
vector = [1,-1,0]
#B
vector = [pi/2,-pi/4,0]
#C
vector = [1/sqrt(2),-1/sqrt(2),0]
#D
vector = [1/sqrt(2),-1/sqrt(2),-1]
from qiskit.visualization import plot_state_qsphere
qc = QuantumCircuit(3)
qc.h(0)
#qc.z(0)
qc.x(1)
qc.cx(0,1)
qc.x(2)
qc.cx(1,2)
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc, backend).result()
statevector= job.get_statevector()
plot_state_qsphere(statevector)
qc = QuantumCircuit(1)
qc.x(0)
qc.h(0)
simulator = Aer.get_backend('unitary_simulator')
job = execute(qc,simulator)
result = job.result()
outputstate = result.get_unitary(qc,1)
print(outputstate)
qc = QuantumCircuit(3,3)
qc.h([0,1,2])
qc.barrier()
qc.measure([0,1,2],range(3))
qc.draw()
print(qc.qasm())
qasm_sim = Aer.get_backend('qasm_simulator')
qc= QuantumCircuit(3)
qc.x([0,1,2])
qc.ccx(0,1,2)
qc.measure_all()
result = execute(qc,qasm_sim).result()
counts = result.get_counts()
print(counts)
qc= QuantumCircuit(3)
qc.ct()
from qiskit.quantum_info import DensityMatrix
matrix1 = [
[1,0],[0,0]
]
matrix2 = [
[0.5,0.5],[0.5,0.5]
]
#A
result= DensityMatrix.tensor(matrix1,matrix2)
print(result)
#B
matrix1 = DensityMatrix(matrix1)
print(matrix1.tensor(matrix2))
#C
print(matrix1.tensor(matrix2))
#D
print(DensityMatrix.tensor(matrix1,matrix2))
from qiskit.visualization import plot_state_city
qc = QuantumCircuit(2)
qc.h(0)
qc.x(1)
qc.cx(0,1)
qc.z(0)
simulator = BasicAer.get_backend('statevector_simulator')
job = execute(qc,simulator).result()
statevector = job.get_statevector()
plot_state_city(statevector)
qc = QuantumCircuit(1)
#A
#qc.ry(pi/2,0)
#qc.s(0)
#qc.rx(pi/2,0)
#B
#qc.ry(pi/2,0)
#qc.rx(pi/2,0)
#qc.s(0)
#C
#qc.s(0)
#qc.ry(pi/2,0)
#qc.rx(pi/2,0)
#D
qc.rx(pi/2,0)
qc.s(0)
qc.ry(pi/2,0)
qc.measure_all()
simulator = BasicAer.get_backend('qasm_simulator')
job = execute(qc,simulator).result()
counts = job.get_counts()
print(counts)
from qiskit.quantum_info import DensityMatrix
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0,1)
qc1= QuantumCircuit(2)
qc1.h(0)
qc1.x(1)
qc1.cx(0,1)
#qc.draw('mpl')
rho_qc=DensityMatrix.from_instruction(qc)
rho_qc.draw()
rho1= DensityMatrix.from_instruction(qc1)
rho1.draw()
qc1new = qc1.decompose()
qc1new.draw()
#tensor1 = DensityMatrix.from_label('[[0,1],[1,0]]')
qc = QuantumCircuit(2)
#v1,v2 = [0,1],[0,1]
v = [1/sqrt(2),0,0,1/sqrt(2)]
qc.initialize(v,[0,1])
qc.draw(output='mpl')
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc, simulator).result()
statevector = result.get_statevector()
print(statevector)
from qiskit.circuit.library import CXGate
ccx = CXGate().control()
qc = QuantumCircuit(3)
qc.append(ccx,[0,1,2])
qc.draw()
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute
qc= QuantumCircuit(3)
qc.barrier()
qc.barrier([0])
qc.draw()
qc = QuantumCircuit.from_qasm_file('myfile.qasm')
qc.measure_all()
qc.draw(output='latex_source')
from qiskit.quantum_info import Statevector
from qiskit.visualization import plot_state_qsphere, plot_state_paulivec, plot_state_city, plot_bloch_vector, plot_state_hinton, plot_bloch_multivector
stavec = Statevector.from_label('001')
plot_state_paulivec(stavec)
import qiskit.tools.jupyter
%qiskit_version_table
import qiskit.tools.jupyter
%qiskit_backend_overview
|
https://github.com/COFAlumni-USB/qiskit-fall-2022
|
COFAlumni-USB
|
#Para la construccion de circuitos cuanticos
from qiskit import QuantumCircuit, execute
#Para la construccion de las calibraciones y su vinculacion
from qiskit import pulse, transpile
from qiskit.pulse.library import Gaussian
from qiskit.providers.fake_provider import FakeValencia, FakeHanoi
#Compuertas personalizadas
from qiskit.circuit import Gate
from qiskit import QiskitError
#informacion
import qiskit.tools.jupyter
circ = QuantumCircuit(2, 2);
circ.h(0);
circ.cx(0, 1);
circ.measure(0, 0);
circ.measure(1, 1);
circ.draw('mpl');
result = execute(circ, backend=FakeValencia()).result();
counts = result.get_counts();
print("El resultado de la estadística es: \n|00>: {S00} \n|01>: {S01} \n|10>: {S10} \n|11>: {S11}".format(S00=counts['00'],S01=counts['01'],S10=counts['10'],S11=counts['11']))
backend = FakeValencia();
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0));
h_q0.draw()
backend = FakeValencia()ñ
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=128, amp=0.1, sigma=16), pulse.drive_channel(0))
h_q0.draw()
circ.add_calibration('h', [0], h_q0)
backend = FakeHanoi()
circ = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ.draw('mpl', idle_wires=False)
result = execute(circ, backend=FakeValencia()).result();
counts = result.get_counts();
print("El resultado de la estadística es: \n|00>: {S00} \n|01>: {S01} \n|10>: {S10} \n|11>: {S11}".format(S00=counts['00'],S01=counts['01'],S10=counts['10'],S11=counts['11']))
circ = QuantumCircuit(1, 1)
custom_gate = Gate('my_custom_gate', 1, [3.14, 1])
# 3.14 is an arbitrary parameter for demonstration
circ.append(custom_gate, [0])
circ.measure(0, 0)
circ.draw('mpl')
with pulse.build(backend, name='custom') as my_schedule:
pulse.play(Gaussian(duration=64, amp=0.2, sigma=8), pulse.drive_channel(0))
circ.add_calibration('my_custom_gate', [0], my_schedule, [3.14, 1])
# Alternatively: circ.add_calibration(custom_gate, [0], my_schedule)
circ = transpile(circ, backend)
circ.draw('mpl', idle_wires=False)
circ = QuantumCircuit(2, 2)
circ.append(custom_gate, [1])
from qiskit import QiskitError
try:
circ = transpile(circ, backend)
except QiskitError as e:
print(e)
%qiskit_version_table
%qiskit_copyright
|
https://github.com/COFAlumni-USB/qiskit-fall-2022
|
COFAlumni-USB
|
#For Python and advanced manipulation import these packages
try:
import numpy as np
except:
!pip install numpy
import numpy as np
try:
import qiskit
except:
!pip install qiskit
import qiskit
try:
!pip install pylatexenc
estilo = 'mpl'
QuantumCircuit(1).draw(estilo)
except:
estilo = 'text'
#Libraries for quantum circuits
from qiskit import QuantumCircuit, execute
#For calibration
from qiskit import pulse, transpile
from qiskit.pulse.library import Gaussian
#Personalized gates
from qiskit.circuit import Gate
from qiskit import QiskitError
#For information
import qiskit.tools.jupyter
from qiskit import IBMQ
#Load our IBM Quantum account
provider = IBMQ.enable_account("c8440457d4ccb10786816758f1ffd909ea528ea12c2ac744598dd73ec90d1476ffa9f58251d0db77b256bcb655f85be37e3163e5548178ed618bc2ec2c57fbf4")
provider.backends()
from qiskit.providers.ibmq import least_busy
#This searches for the least busy backend, with 5 qubits
small_devices = provider.backends(filters=lambda x: x.configuration().n_qubits == 5
and not x.configuration().simulator)
least_busy(small_devices)
#Once we saw which backend was lest busy, we choose it with .get_backend()
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
backend = provider.get_backend('ibmq_quito')
backend_config = backend.configuration()
#Sampling time of the pulses
dt = backend_config.dt
print(f"Sampling time: {dt*1e9} ns")
#Timing constraint of the backend
backend.configuration().timing_constraints
#We get those values and save it in a variable
acquire_alignment = backend.configuration().timing_constraints['acquire_alignment']
granularity = backend.configuration().timing_constraints['granularity']
pulse_alignment = backend.configuration().timing_constraints['pulse_alignment']
lcm = np.lcm(acquire_alignment, pulse_alignment)
print(f"Least common multiple of acquire_alignment and pulse_alignment: {lcm}")
backend_defaults = backend.defaults()
#Finding qubit's frequency
#Defining units
GHz = 1.0e9 # Gigahertz
MHz = 1.0e6 # Megahertz
us = 1.0e-6 # Microseconds
ns = 1.0e-9 # Nanoseconds
#We will work with the following qubit
qubit = 0
#Center the sweep in a qubit estimated frequency
center_frequency_Hz = backend_defaults.qubit_freq_est[qubit] # in Hz
print(f"Qubit {qubit} has an estimated frequency of {center_frequency_Hz / GHz} GHz.")
# scale factor to remove factors of 10 from the data ???
scale_factor = 1e-7
#Sweep 40 MHz around the estimated frequency
frequency_span_Hz = 40 * MHz
#With 1MHz steps
frequency_step_Hz = 1 * MHz
#Sweep 20 MHz above and 20 MHz below the estimated frequency
frequency_min = center_frequency_Hz - frequency_span_Hz / 2
frequency_max = center_frequency_Hz + frequency_span_Hz / 2
#Array of the frequencies
frequencies_GHz = np.arange(frequency_min / GHz,
frequency_max / GHz,
frequency_step_Hz / GHz)
print(f"The sweep will go from {frequency_min / GHz} GHz to {frequency_max / GHz} GHz \
in steps of {frequency_step_Hz / MHz} MHz.")
#This function returns the closest multiple between two values
def get_closest_multiple_of(value, base_number):
return int(value + base_number/2) - (int(value + base_number/2) % base_number)
#Lenght of the pulse: it has to be multiple of 16 (granularity)
def get_closest_multiple_of_16(num):
return get_closest_multiple_of(num, granularity)
#Lenght of the delay, converts s to dt
def get_dt_from(sec):
return get_closest_multiple_of(sec/dt, lcm)
from qiskit.circuit import Parameter
from qiskit.circuit import QuantumCircuit, Gate
#Drive pulse parameters (us = microseconds)
#Determines width of the Gaussian
drive_sigma_sec = 0.015 * us
#Truncating parameter
drive_duration_sec = drive_sigma_sec * 8
#Pulse's amplitude
drive_amp = 0.05
#Base schedule
freq = Parameter('freq')
with pulse.build(backend=backend, default_alignment='sequential', name='Frequency sweep') as sweep_sched:
#seconds_to_samples(s) gets the number of samples that will elapse in seconds on the active backend.
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
#Returns the qubit's DriveChannel on the active backend
#Drive channels transmit signals to qubits which enact gate operations
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(freq, drive_chan)
#Drive pulse samples
pulse.play(pulse.Gaussian(duration=drive_duration,
sigma=drive_sigma,
amp=drive_amp,
name='freq_sweep_excitation_pulse'), drive_chan)
#Plot Gaussian pulse
sweep_sched.draw()
#Gate(name, num_qubits, params) creates a new gate
sweep_gate = Gate("sweep", 1, [freq])
#Create the quantum circuit, 1 qubit, 1 bit
qc_sweep = QuantumCircuit(1, 1)
#Add our new gate sweep_gate to the quantum circuit
qc_sweep.append(sweep_gate, [0])
qc_sweep.measure(0, 0)
"""This command: add_calibration(gate, qubits, schedule, params=None)
registers a low-level, custom pulse definition for the given gate"""
qc_sweep.add_calibration(sweep_gate, (0,), sweep_sched, [freq])
#Frequency settings for the sweep (MUST BE IN HZ)
frequencies_Hz = frequencies_GHz*GHz #convert to Hz
"""This command: assign_parameters(parameters, inplace=False)
assigns parameters to new parameters or values"""
exp_sweep_circs = [qc_sweep.assign_parameters({freq: f}, inplace=False) for f in frequencies_Hz]
from qiskit import schedule
#schedule(circuits, backend) to schedule a circuit to a pulse Schedule, using the backend
sweep_schedule = schedule(exp_sweep_circs[0], backend)
#To show the schedule
sweep_schedule.draw(backend=backend)
#Each schedule will be repeated num_shots_per_frequency times
num_shots_per_frequency = 1024
job = backend.run(exp_sweep_circs,
meas_level=1, #kerneled data
meas_return='avg',
shots=num_shots_per_frequency)
from qiskit.tools.monitor import job_monitor
#Monitor the job status
job_monitor(job)
#Retrieve the results
frequency_sweep_results = job.result(timeout=1200)
#Plotting the results with matplotlib
import matplotlib.pyplot as plt
sweep_values = []
for i in range(len(frequency_sweep_results.results)):
#Get the results from the ith experiment
res = frequency_sweep_results.get_memory(i)*scale_factor
#Get the results for `qubit` from this experiment
sweep_values.append(res[qubit])
#Plot frequencies vs. real part of sweep values
plt.scatter(frequencies_GHz, np.real(sweep_values), color='black')
plt.xlim([min(frequencies_GHz), max(frequencies_GHz)])
plt.xlabel("Frequency [GHz]")
plt.ylabel("Measured signal [a.u.]")
plt.show()
#Using scipy for the curve fitting
from scipy.optimize import curve_fit
def fit_function(x_values, y_values, function, init_params):
fitparams, conv = curve_fit(function, x_values, y_values, init_params)
y_fit = function(x_values, *fitparams)
return fitparams, y_fit
#Fitting the curve. We use a Lorentzian function for the fit
"""We need to assign the correct initial parameters for our data
A is related to the height of the curve,
B is related to the width of the Lorentzian,
C is the cut with the Y axis and
q_freq is the estimated peak frequency of the curve."""
fit_params, y_fit = fit_function(frequencies_GHz,
np.real(sweep_values),
lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,
[1e8, 5.3, 0.02, -1e8] # initial parameters for curve_fit
)
#Plotting the data
plt.scatter(frequencies_GHz, np.real(sweep_values), color='black')
#and plotting the fit
plt.plot(frequencies_GHz, y_fit, color='red')
plt.xlim([min(frequencies_GHz), max(frequencies_GHz)])
plt.xlabel("Frequency [GHz]")
plt.ylabel("Measured Signal [a.u.]")
plt.show()
A, rough_qubit_frequency, B, C = fit_params
rough_qubit_frequency = rough_qubit_frequency*GHz # make sure qubit freq is in Hz
print(f"We've updated our qubit frequency estimate from "
f"{round(backend_defaults.qubit_freq_est[qubit] / GHz, 5)} GHz to {round(rough_qubit_frequency/GHz, 5)} GHz.")
#Calibrating using a pi pulse
#Pi pulses takes a qubit from |0> to |1> (X gate)
# Rabi experiment parameters
num_rabi_points = 50
#Drive amplitude values to iterate over
#50 amplitudes evenly spaced from 0 to 0.75 using linspace
drive_amp_min = 0
drive_amp_max = 0.55 #Changed this parameter
drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)
#Build the Rabi experiments:
"""A drive pulse at the qubit frequency, followed by a measurement,
where we vary the drive amplitude each time"""
#This is similar to the frequency sweep schedule
drive_amp = Parameter('drive_amp')
with pulse.build(backend=backend, default_alignment='sequential', name='Rabi Experiment') as rabi_sched:
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(rough_qubit_frequency, drive_chan)
pulse.play(pulse.Gaussian(duration=drive_duration,
amp=drive_amp,
sigma=drive_sigma,
name='Rabi Pulse'), drive_chan)
#New rabi gate
rabi_gate = Gate("rabi", 1, [drive_amp])
#New quantum circuit for Rabi Experiment
qc_rabi = QuantumCircuit(1, 1)
#Add the rabi_gate we just defined
qc_rabi.append(rabi_gate, [0])
#Measure the QC
qc_rabi.measure(0, 0)
#Add calibration to the rabi_gate
qc_rabi.add_calibration(rabi_gate, (0,), rabi_sched, [drive_amp])
exp_rabi_circs = [qc_rabi.assign_parameters({drive_amp: a}, inplace=False) for a in drive_amps]
#Create our schedule and draw it
rabi_schedule = schedule(exp_rabi_circs[-1], backend)
rabi_schedule.draw(backend=backend)
num_shots_per_point = 1024
job = backend.run(exp_rabi_circs,
meas_level=1,
meas_return='avg',
shots=num_shots_per_point)
job_monitor(job)
#Get the results
rabi_results = job.result(timeout=120)
#We need to extract the results and fit them to a sinusoidal curve.
"""The range of amplitudes we got will rotate (hopefully) the qubit several times
around the Bloch sphere. We need to find the drive amplitude needed for the
signal to oscillate from a maximum to a minimum (all |0> to all |1>).
That's exactly what gives us the calibrated amplitude represented
by the pi pulse"""
#First we center the data around 0
def baseline_remove(values):
return np.array(values) - np.mean(values)
#Empty array for Rabi values
rabi_values = []
#Remember we defined num_rabi_points initially at 50
for i in range(num_rabi_points):
#Get the results for `qubit` from the ith experiment
rabi_values.append(rabi_results.get_memory(i)[qubit] * scale_factor)
#We get the real values from the centered rabi_values
rabi_values = np.real(baseline_remove(rabi_values))
#Plot the results
plt.xlabel("Drive amp [a.u.]")
plt.ylabel("Measured signal [a.u.]")
#Plotting amplitudes vs Rabi values
plt.scatter(drive_amps, rabi_values, color='black')
plt.show()
#Now we fit the curve, similarly to the frequencies fit
fit_params, y_fit = fit_function(drive_amps,
rabi_values,
lambda x, A, B, drive_period, phi: (A*np.cos(2*np.pi*x/drive_period - phi) + B),
[7e7, 0, 0.20, 0])
plt.scatter(drive_amps, rabi_values, color='black')
plt.plot(drive_amps, y_fit, color='red')
drive_period = fit_params[2] #get period of rabi oscillation
plt.xlabel("Drive amp [a.u.]", fontsize=15)
plt.ylabel("Measured signal [a.u.]", fontsize=15)
plt.show()
#Pi pulse's amplitude needed for the signal to oscillate from maximum to minimum
pi_amp = abs(drive_period / 2)
print(f"Pi Amplitude = {pi_amp}")
#We can define our pi pulse now
with pulse.build(backend) as pi_pulse:
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
drive_chan = pulse.drive_channel(qubit)
pulse.play(pulse.Gaussian(duration=drive_duration,
amp=pi_amp,
sigma=drive_sigma,
name='pi_pulse'), drive_chan)
#Now we create a ground state to try our pi pulse
qc_gnd = QuantumCircuit(1, 1)
qc_gnd.measure(0, 0)
#And its ground schedule
gnd_schedule = schedule(qc_gnd, backend)
gnd_schedule.draw(backend=backend)
#We create the excited state
with pulse.build(backend=backend, default_alignment='sequential', name='excited state') as exc_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(rough_qubit_frequency, drive_chan)
pulse.call(pi_pulse)
#And another QC for the excited state
qc_exc = QuantumCircuit(1, 1)
#Apply X gate to qubit 0
qc_exc.x(0)
#And measure it
qc_exc.measure(0, 0)
#Then we add the calibration from the excited state's sched
qc_exc.add_calibration("x", (0,), exc_schedule, [])
#Now execute the exc state's schedule
exec_schedule = schedule(qc_exc, backend)
exec_schedule.draw(backend=backend)
#Preparation schedules for the ground and excited states
num_shots = 1024
job = backend.run([qc_gnd, qc_exc],
#Choosing meas_level 1 for kerneled data
meas_level=1,
meas_return='single',
shots=num_shots)
job_monitor(job)
gnd_exc_results = job.result(timeout=120)
#Getting the ground and excited state's results
gnd_results = gnd_exc_results.get_memory(0)[:, qubit]*scale_factor
exc_results = gnd_exc_results.get_memory(1)[:, qubit]*scale_factor
#Plotting results
plt.figure()
#Ground state in blue
plt.scatter(np.real(gnd_results), np.imag(gnd_results),
s=5, cmap='viridis', c='blue', alpha=0.5, label='Gnd state')
#Excited state in red
plt.scatter(np.real(exc_results), np.imag(exc_results),
s=5, cmap='viridis', c='red', alpha=0.5, label='Exc state')
plt.axis('square')
#Plot a large black dot for the average result of the 0 and 1 states
#Mean of real and imaginary parts of results
mean_gnd = np.mean(gnd_results)
mean_exc = np.mean(exc_results)
plt.scatter(np.real(mean_gnd), np.imag(mean_gnd),
s=100, cmap='viridis', c='black',alpha=1.0, label='Mean')
plt.scatter(np.real(mean_exc), np.imag(mean_exc),
s=100, cmap='viridis', c='black',alpha=1.0)
plt.ylabel('Im [a.u.]', fontsize=15)
plt.xlabel('Q (Real) [a.u.]', fontsize=15)
plt.title("0-1 discrimination", fontsize=15)
plt.legend()
plt.show()
"""Setting up a classifier function:
returns 0 if a given point is closer to the mean of ground state results
and returns 1 if the point is closer to the avg exc state results"""
import math
#This functions classifies the given state as |0> or |1>.
def classify(point: complex):
def distance(a, b):
return math.sqrt((np.real(a) - np.real(b))**2 + (np.imag(a) - np.imag(b))**2)
return int(distance(point, mean_exc) < distance(point, mean_gnd))
#T1 time: time it takes for a qubit to decay from exc_state to gnd_state
"""To measure T1 we use the pi pulse we've calibrated, then a measure pulse.
But first we have to insert a delay."""
#T1 experiment parameters
time_max_sec = 450 * us
time_step_sec = 6.5 * us
delay_times_sec = np.arange(1 * us, time_max_sec, time_step_sec)
#We define the delay
delay = Parameter('delay')
#Create another quantum circuit
qc_t1 = QuantumCircuit(1, 1)
#X Gate
qc_t1.x(0)
#Delay
qc_t1.delay(delay, 0)
#Measurement
qc_t1.measure(0, 0)
#Calibration on X gate with our pi pulse
qc_t1.add_calibration("x", (0,), pi_pulse)
exp_t1_circs = [qc_t1.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in delay_times_sec]
#Schedule for T1
sched_idx = -1
t1_schedule = schedule(exp_t1_circs[sched_idx], backend)
t1_schedule.draw(backend=backend)
#Execution settings
num_shots = 256
job = backend.run(exp_t1_circs,
meas_level=1,
meas_return='single',
shots=num_shots)
job_monitor(job)
t1_results = job.result(timeout=120)
#Getting the results to plot
t1_values = []
for i in range(len(delay_times_sec)):
iq_data = t1_results.get_memory(i)[:,qubit] * scale_factor
#sum() returns the sum of all items.
#map() returns a map object of the result after applying a given
#function (sum) to each item of a given iterable.
t1_values.append(sum(map(classify, iq_data)) / num_shots)
plt.scatter(delay_times_sec/us, t1_values, color='black')
plt.title("$T_1$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.show()
#Fitting data with an exponential curve
fit_params, y_fit = fit_function(delay_times_sec/us, t1_values,
lambda x, A, C, T1: (A * np.exp(-x / T1) + C),
[-3, 3, 100]
)
_, _, T1 = fit_params
plt.scatter(delay_times_sec/us, t1_values, color='black')
plt.plot(delay_times_sec/us, y_fit, color='red', label=f"T1 = {T1:.2f} us")
plt.xlim(0, np.max(delay_times_sec/us))
plt.title("$T_1$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.legend()
plt.show()
#Measure qubit frequency (precisely) with Ramsey Experiment
#Apply a pi/2 pulse, wait some time, and then anothe pi/2 pulse.
#Ramsey experiment parameters
time_max_sec = 1.8 * us
time_step_sec = 0.025 * us
delay_times_sec = np.arange(0.1 * us, time_max_sec, time_step_sec)
#Drive parameters
#Drive amplitude for pi/2 is simply half the amplitude of the pi pulse
drive_amp = pi_amp / 2
#Build the x_90 pulse, which is an X rotation of 90 degrees, a pi/2 rotation
with pulse.build(backend) as x90_pulse:
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
drive_chan = pulse.drive_channel(qubit)
pulse.play(pulse.Gaussian(duration=drive_duration,
amp=drive_amp,
sigma=drive_sigma,
name='x90_pulse'), drive_chan)
#Now we have to drive the pulses off-resonance an amount detuning_MHz
detuning_MHz = 2
ramsey_frequency = round(rough_qubit_frequency + detuning_MHz * MHz, 6) #Ramsey freq in Hz
#Pulse for Ramsey experiment
delay = Parameter('delay')
with pulse.build(backend=backend, default_alignment='sequential', name="Ramsey delay Experiment") as ramsey_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(ramsey_frequency, drive_chan)
pulse.call(x90_pulse)
pulse.delay(delay, drive_chan)
pulse.call(x90_pulse)
#Ramsey gate
ramsey_gate = Gate("ramsey", 1, [delay])
#Another QC for Ramsey experiment
qc_ramsey = QuantumCircuit(1, 1)
#Adding the gate to the circuit
qc_ramsey.append(ramsey_gate, [0])
qc_ramsey.measure(0, 0)
qc_ramsey.add_calibration(ramsey_gate, (0,), ramsey_schedule, [delay])
exp_ramsey_circs = [qc_ramsey.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in delay_times_sec]
ramsey_schedule = schedule(exp_ramsey_circs[2], backend)
ramsey_schedule.draw(backend=backend)
#Execution settings for Ramsey experimet
num_shots = 256
job = backend.run(exp_ramsey_circs,
meas_level=1,
meas_return='single',
shots=num_shots)
job_monitor(job)
ramsey_results = job.result(timeout=120)
#Array for the results
ramsey_values = []
for i in range(len(delay_times_sec)):
iq_data = ramsey_results.get_memory(i)[:,qubit] * scale_factor
ramsey_values.append(sum(map(classify, iq_data)) / num_shots)
#Plotting the results
plt.scatter(delay_times_sec/us, np.real(ramsey_values), color='black')
plt.xlim(0, np.max(delay_times_sec/us))
plt.title("Ramsey Experiment", fontsize=15)
plt.xlabel('Delay between X90 pulses [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.show()
#Fitting data to a sinusoid
fit_params, y_fit = fit_function(delay_times_sec/us, np.real(ramsey_values),
lambda x, A, del_f_MHz, C, B: (
A * np.cos(2*np.pi*del_f_MHz*x - C) + B
),
[5, 2.2, 0, 0.4]
)
# Off-resonance component
_, del_f_MHz, _, _, = fit_params # freq is MHz since times in us
plt.scatter(delay_times_sec/us, np.real(ramsey_values), color='black')
plt.plot(delay_times_sec/us, y_fit, color='red', label=f"df = {del_f_MHz:.2f} MHz")
plt.xlim(0, np.max(delay_times_sec/us))
plt.xlabel('Delay between X90 pulses [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Ramsey Experiment', fontsize=15)
plt.legend()
plt.show()
precise_qubit_freq = rough_qubit_frequency + (detuning_MHz - del_f_MHz) * MHz # get new freq in Hz
print(f"Our updated qubit frequency is now {round(precise_qubit_freq/GHz, 6)} GHz. "
f"It used to be {round(rough_qubit_frequency / GHz, 6)} GHz")
#Measuring coherence time (T2) with Hahn Echoes experiment
#It's the same as Ramsey's experiment, with a pi pulse between the pi/2
#T2 experiment parameters
tau_max_sec = 200 * us
tau_step_sec = 4 * us
delay_times_sec = np.arange(2 * us, tau_max_sec, tau_step_sec)
#Define the delay and build the pulse for T2
delay = Parameter('delay')
with pulse.build(backend=backend, default_alignment='sequential', name="T2 delay Experiment") as t2_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(precise_qubit_freq, drive_chan)
pulse.call(x90_pulse)
pulse.delay(delay, drive_chan)
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
pulse.call(x90_pulse)
#Define T2 gate
t2_gate = Gate("t2", 1, [delay])
#QC for T2
qc_t2 = QuantumCircuit(1, 1)
#Add T2 gate
qc_t2.append(t2_gate, [0])
qc_t2.measure(0, 0)
#Add calibration with delay
qc_t2.add_calibration(t2_gate, (0,), t2_schedule, [delay])
exp_t2_circs = [qc_t2.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in delay_times_sec]
#Schedule for T2 and show it
t2_schedule = schedule(exp_t2_circs[-1], backend)
t2_schedule.draw(backend=backend)
#Execution settings
num_shots_per_point = 512
job = backend.run(exp_t2_circs,
meas_level=1, #Kerneled data
meas_return='single',
shots=num_shots_per_point)
job_monitor(job)
#Getting results
t2_results = job.result(timeout=120)
#T2 empty array
t2_values = []
#Retrieving results and adding them classified to the array
for i in range(len(delay_times_sec)):
iq_data = t2_results.get_memory(i)[:,qubit] * scale_factor
t2_values.append(sum(map(classify, iq_data)) / num_shots_per_point)
#Plot ressults for Hanh Echo experiment
plt.scatter(2*delay_times_sec/us, t2_values, color='black')
plt.xlabel('Delay between X90 pulse and $\pi$ pulse [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Hahn Echo Experiment', fontsize=15)
plt.show()
#Fitting data with an exponential function
fit_params, y_fit = fit_function(2*delay_times_sec/us, t2_values,
lambda x, A, B, T2: (A * np.exp(-x / T2) + B),
[-3, 0, 100])
_, _, T2 = fit_params
#Plotting results and fit curve
plt.scatter(2*delay_times_sec/us, t2_values, color='black')
plt.plot(2*delay_times_sec/us, y_fit, color='red', label=f"T2 = {T2:.2f} us")
plt.xlim(0, np.max(2*delay_times_sec/us))
plt.xlabel('Delay between X90 pulse and $\pi$ pulse [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Hahn Echo Experiment', fontsize=15)
plt.legend()
plt.show()
#Dynamical decoupling technique
#Used to extract longer coherence times from qubits
#Experiment parameters
tau_sec_min = 1 * us
tau_sec_max = 180 * us
tau_step_sec = 4 * us
taus_sec = np.arange(tau_sec_min, tau_sec_max, tau_step_sec)
num_sequence = 1
print(f"Total time ranges from {2.*num_sequence*taus_sec[0] / us} to {2.*num_sequence*taus_sec[-1] / us} us")
#This schedule is different from the others...
delay = Parameter('delay')
with pulse.build(backend=backend, default_alignment='sequential', name="T2DD delay Experiment") as T2DD_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(precise_qubit_freq, drive_chan)
pulse.call(x90_pulse)
pulse.delay(delay/2, drive_chan)
for loop_counts in range(num_sequence):
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
with pulse.phase_offset(np.pi/2, pulse.drive_channel(qubit)):
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
with pulse.phase_offset(np.pi/2, pulse.drive_channel(qubit)):
pulse.call(pi_pulse)
if loop_counts != num_sequence-1:
pulse.delay(delay, drive_chan)
pulse.delay(delay/2, drive_chan)
pulse.call(x90_pulse)
#Create new gate for T2 Dynamical Decouling
T2DD_gate = Gate("T2DD", 1, [delay])
#Another QC
qc_T2DD = QuantumCircuit(1, 1)
qc_T2DD.append(T2DD_gate, [0])
qc_T2DD.measure(0, 0)
qc_T2DD.add_calibration(T2DD_gate, (0,), T2DD_schedule, [delay])
exp_T2DD_circs = [qc_T2DD.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in taus_sec]
#Schedule fot T2 D.D.
T2DD_schedule = schedule(exp_T2DD_circs[-1], backend)
T2DD_schedule.draw(backend=backend)
num_shots_per_point = 1024
job = backend.run(exp_T2DD_circs,
meas_level=1,
meas_return='single',
shots=num_shots_per_point)
job_monitor(job)
T2DD_results = job.result(timeout=120)
times_sec = 4*num_sequence*taus_sec
DD_values = []
for i in range(len(times_sec)):
iq_data = T2DD_results.get_memory(i)[:,qubit] * scale_factor
DD_values.append(sum(map(classify, iq_data)) / num_shots_per_point)
plt.scatter(times_sec/us, DD_values, color='black')
plt.xlim(0, np.max(times_sec/us))
plt.xlabel('Total time before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Dynamical Decoupling Experiment', fontsize=15)
plt.show()
# Fit the data
fit_func = lambda x, A, B, T2DD: (A * np.exp(-x / T2DD) + B)
fitparams, conv = curve_fit(fit_func, times_sec/us, DD_values, [3.5, 0.8, 150])
_, _, T2DD = fitparams
plt.scatter(times_sec/us, DD_values, color='black')
plt.plot(times_sec/us, fit_func(times_sec/us, *fitparams), color='red', label=f"T2DD = {T2DD:.2f} us")
plt.xlim([0, np.max(times_sec/us)])
plt.xlabel('Total time before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Dynamical Decoupling Experiment', fontsize=15)
plt.legend()
plt.show()
|
https://github.com/COFAlumni-USB/qiskit-fall-2022
|
COFAlumni-USB
|
#For Python and advanced manipulation import these packages
try:
import numpy as np
except:
!pip install numpy
import numpy as np
try:
import qiskit
except:
!pip install qiskit
import qiskit
try:
!pip install pylatexenc
estilo = 'mpl'
QuantumCircuit(1).draw(estilo)
except:
estilo = 'text'
#Libraries for quantum circuits
from qiskit import QuantumCircuit, execute
#For calibration
from qiskit import pulse, transpile
from qiskit.pulse.library import Gaussian
#Personalized gates
from qiskit.circuit import Gate
from qiskit import QiskitError
#For information
import qiskit.tools.jupyter
from qiskit import IBMQ
#Load our IBM Quantum account
provider = IBMQ.enable_account("c8440457d4ccb10786816758f1ffd909ea528ea12c2ac744598dd73ec90d1476ffa9f58251d0db77b256bcb655f85be37e3163e5548178ed618bc2ec2c57fbf4")
provider.backends()
from qiskit.providers.ibmq import least_busy
#This searches for the least busy backend, with 5 qubits
small_devices = provider.backends(filters=lambda x: x.configuration().n_qubits == 5
and not x.configuration().simulator)
least_busy(small_devices)
#Once we saw which backend was lest busy, we choose it with .get_backend()
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
backend = provider.get_backend('ibmq_belem')
backend_config = backend.configuration()
print(backend_config)
#Sampling time of the pulses
dt = backend_config.dt
print(f"Sampling time: {dt*1e9} ns")
#Timing constraint of the backend
backend.configuration().timing_constraints
#We get those values and save it in a variable
acquire_alignment = backend.configuration().timing_constraints['acquire_alignment']
granularity = backend.configuration().timing_constraints['granularity']
pulse_alignment = backend.configuration().timing_constraints['pulse_alignment']
lcm = np.lcm(acquire_alignment, pulse_alignment)
print(f"Least common multiple of acquire_alignment and pulse_alignment: {lcm}")
backend_defaults = backend.defaults()
#Finding qubit's frequency
#Defining units
GHz = 1.0e9 # Gigahertz
MHz = 1.0e6 # Megahertz
us = 1.0e-6 # Microseconds
ns = 1.0e-9 # Nanoseconds
#We will work with the following qubit
qubit = 0
#Center the sweep in a qubit estimated frequency in Hz
center_frequency_Hz = backend_defaults.qubit_freq_est[qubit]
print(f"Qubit {qubit} has an estimated frequency of {center_frequency_Hz / GHz} GHz.")
# scale factor to remove factors of 10 from the data
scale_factor = 1e-7
#Sweep 40 MHz around the estimated frequency
frequency_span_Hz = 40 * MHz
#With 1MHz steps
frequency_step_Hz = 1 * MHz
#Sweep 20 MHz above and 20 MHz below the estimated frequency
frequency_min = center_frequency_Hz - frequency_span_Hz / 2
frequency_max = center_frequency_Hz + frequency_span_Hz / 2
#Array of the frequencies
frequencies_GHz = np.arange(frequency_min / GHz,
frequency_max / GHz,
frequency_step_Hz / GHz)
print(f"The sweep will go from {frequency_min / GHz} GHz to {frequency_max / GHz} GHz \
in steps of {frequency_step_Hz / MHz} MHz.")
#This function returns the closest multiple between two values
def get_closest_multiple_of(value, base_number):
return int(value + base_number/2) - (int(value + base_number/2) % base_number)
#Lenght of the pulse: it has to be multiple of 16 (granularity)
def get_closest_multiple_of_16(num):
return get_closest_multiple_of(num, granularity)
#Lenght of the delay, converts s to dt
def get_dt_from(sec):
return get_closest_multiple_of(sec/dt, lcm)
from qiskit.circuit import Parameter
from qiskit.circuit import QuantumCircuit, Gate
#Drive pulse parameters (us = microseconds)
#Determines width of the Gaussian, but I need it for drive_duration
drive_sigma_sec = 0.015 * us
#Truncating parameter
"""Changed this parameter because I was getting three pulses
in Measured Signal. With this new duration I get the envelope
of the three pulses. This happens because the transitions of
the qubit states (0/1) are not perfectly populated,
so we get a mix of the other states."""
drive_duration_sec = drive_sigma_sec * 3.5
# For qubit frequency estimate from 5.09021 GHz to 5.09035 GHz use:
# drive_duration_sec = drive_sigma_sec * 4
#Pulse's amplitude
drive_amp = 0.05
#Base schedule
freq = Parameter('freq')
with pulse.build(backend=backend, default_alignment='sequential', name='Frequency sweep') as sweep_sched:
#seconds_to_samples(s) gets the number of samples that will elapse in seconds on the active backend.
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
#sigma is not required for Constant pulse
#drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
#Returns the qubit's DriveChannel on the active backend
#Drive channels transmit signals to qubits which enact gate operations
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(freq, drive_chan)
#Drive pulse samples
pulse.play(pulse.Constant(duration=drive_duration,
amp=drive_amp,
name='freq_sweep_excitation_pulse'), drive_chan)
#Plot the pulse
sweep_sched.draw()
#Gate(name, num_qubits, params) creates a new gate
sweep_gate = Gate("sweep", 1, [freq])
#Create the quantum circuit, 1 qubit, 1 bit
qc_sweep = QuantumCircuit(1, 1)
#Add our new gate sweep_gate to the quantum circuit
qc_sweep.append(sweep_gate, [0])
qc_sweep.measure(0, 0)
"""This command: add_calibration(gate, qubits, schedule, params=None)
registers a low-level, custom pulse definition for the given gate"""
qc_sweep.add_calibration(sweep_gate, (0,), sweep_sched, [freq])
#Frequency settings for the sweep (MUST BE IN HZ)
frequencies_Hz = frequencies_GHz*GHz #convert to Hz
"""This command: assign_parameters(parameters, inplace=False)
assigns parameters to new parameters or values"""
exp_sweep_circs = [qc_sweep.assign_parameters({freq: f}, inplace=False) for f in frequencies_Hz]
from qiskit import schedule
#schedule(circuits, backend) to schedule a circuit to a pulse Schedule, using the backend
sweep_schedule = schedule(exp_sweep_circs[0], backend)
#To show the schedule
sweep_schedule.draw(backend=backend)
#Each schedule will be repeated num_shots_per_frequency times
num_shots_per_frequency = 1024
job = backend.run(exp_sweep_circs,
meas_level=1, #kerneled data
meas_return='avg',
shots=num_shots_per_frequency)
from qiskit.tools.monitor import job_monitor
#Monitor the job status
job_monitor(job)
#Retrieve the results
frequency_sweep_results = job.result(timeout=1200)
#Plotting the results with matplotlib
import matplotlib.pyplot as plt
sweep_values = []
for i in range(len(frequency_sweep_results.results)):
# Get the results from the ith experiment
res = frequency_sweep_results.get_memory(i)*scale_factor
# Get the results for `qubit` from this experiment
sweep_values.append(res[qubit])
#Plot frequencies vs. real part of sweep values
plt.scatter(frequencies_GHz, np.real(sweep_values), color='black')
# plt.xlim([min(frequencies_GHz), max(frequencies_GHz)])
plt.xlabel("Frequency [GHz]")
plt.ylabel("Measured signal [a.u.]")
plt.show()
#Using scipy for the curve fitting
from scipy.optimize import curve_fit
def fit_function(x_values, y_values, function, init_params):
fitparams, conv = curve_fit(function, x_values, y_values, init_params)
y_fit = function(x_values, *fitparams)
return fitparams, y_fit
#Fitting the curve. We use a Gaussian function for the fit
"""We need to assign the correct initial parameters for our data
A is related to the height of the curve,
B is related to the width of the Gaussian,
C is the cut with the Y axis and
q_freq is the estimated peak frequency of the curve."""
fit_params, y_fit = fit_function(frequencies_GHz,
np.real(sweep_values),
lambda x, A, q_freq, B, C: A*np.exp(-(x - q_freq) ** 2 / (2*B**2)) + C,
[1.75e8, 5.090, 0.02, 0] # initial parameters for curve_fit
)
#Plotting the data
plt.scatter(frequencies_GHz, np.real(sweep_values), color='black')
#and plotting the fit
plt.plot(frequencies_GHz, y_fit, color='red')
plt.xlim([min(frequencies_GHz), max(frequencies_GHz)])
plt.xlabel("Frequency [GHz]")
plt.ylabel("Measured Signal [a.u.]")
plt.show()
A, rough_qubit_frequency, B, C = fit_params
rough_qubit_frequency = rough_qubit_frequency*GHz # make sure qubit freq is in Hz
print(f"We've updated our qubit frequency estimate from "
f"{round(backend_defaults.qubit_freq_est[qubit] / GHz, 5)} GHz to {round(rough_qubit_frequency/GHz, 5)} GHz.")
#Calibrating using a pi pulse
#Pi pulses takes a qubit from |0> to |1> (X gate)
#Rabi experiment parameters
num_rabi_points = 50
#Drive amplitude values to iterate over
#50 amplitudes evenly spaced from 0 to 0.75 using linspace
drive_amp_min = 0
drive_amp_max = 0.55 #Changed this parameter
drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)
# Build the Rabi experiments:
"""A drive pulse at the qubit frequency, followed by a measurement,
where we vary the drive amplitude each time"""
#This is similar to the frequency sweep schedule
drive_amp = Parameter('drive_amp')
with pulse.build(backend=backend, default_alignment='sequential', name='Rabi Experiment') as rabi_sched:
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
# drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
drive_chan = pulse.drive_channel(qubit)
#With the rough_qubit_frequency found before
pulse.set_frequency(rough_qubit_frequency, drive_chan)
pulse.play(pulse.Constant(duration=drive_duration,
amp=drive_amp,
name='Rabi Pulse'), drive_chan)
#New rabi gate
rabi_gate = Gate("rabi", 1, [drive_amp])
#New quantum circuit for Rabi Experiment
qc_rabi = QuantumCircuit(1, 1)
#Add the rabi_gate we just defined
qc_rabi.append(rabi_gate, [0])
#Measure the QC
qc_rabi.measure(0, 0)
#Add calibration to the rabi_gate
qc_rabi.add_calibration(rabi_gate, (0,), rabi_sched, [drive_amp])
exp_rabi_circs = [qc_rabi.assign_parameters({drive_amp: a}, inplace=False) for a in drive_amps]
#Create our schedule and draw it
rabi_schedule = schedule(exp_rabi_circs[-1], backend)
rabi_schedule.draw(backend=backend)
num_shots_per_point = 1024
job = backend.run(exp_rabi_circs,
meas_level=1,
meas_return='avg',
shots=num_shots_per_point)
job_monitor(job)
#Get the results
rabi_results = job.result(timeout=120)
"""We need to extract the results and fit them to a sinusoidal curve
The range of amplitudes we got will rotate (hopefully) the qubit several times
around the Bloch sphere. We need to find the drive amplitude needed for the
signal to oscillate from a maximum to a minimum (all |0> to all |1>)
That's exactly what gives us the calibrated amplitud represented by the pi pulse
"""
#First we center the data around 0
def baseline_remove(values):
return np.array(values) - np.mean(values)
#Empty array for Rabi values
rabi_values = []
#Remember we defined num_rabi_points initially at 50
for i in range(num_rabi_points):
#Get the results for 'qubit' from the ith experiment
rabi_values.append(rabi_results.get_memory(i)[qubit] * scale_factor)
#We get the real values from the centered rabi_values
rabi_values = np.real(baseline_remove(rabi_values))
#Plot the results
plt.xlabel("Drive amp [a.u.]")
plt.ylabel("Measured signal [a.u.]")
#Plotting amplitudes vs Rabi values
plt.scatter(drive_amps, rabi_values, color='black')
plt.show()
#Now we fit the curve, similarly to the frequencies fit
#THIS PARAMETERS ARE FOR IBQM QUITO AND BELEM
fit_params, y_fit = fit_function(drive_amps,
rabi_values,
lambda x, A, B, drive_period, phi: (A*np.cos(2*np.pi*x/drive_period - phi) + B),
[1.4e8, 0, 0.16, 0])
plt.scatter(drive_amps, rabi_values, color='black')
plt.plot(drive_amps, y_fit, color='red')
drive_period = fit_params[2] #get period of rabi oscillation
plt.xlabel("Drive amp [a.u.]", fontsize=15)
plt.ylabel("Measured signal [a.u.]", fontsize=15)
plt.show()
#Pi pulse's amplitude needed for the signal to oscillate from maximum to minimum
pi_amp = abs(drive_period / 2)
print(f"Pi Amplitude = {pi_amp}")
#We can define our pi pulse now
with pulse.build(backend) as pi_pulse:
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
# drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
drive_chan = pulse.drive_channel(qubit)
pulse.play(pulse.Constant(duration=drive_duration,
amp=pi_amp,
name='pi_pulse'), drive_chan)
pi_pulse.draw()
#Now we create a ground state to try our pi pulse
qc_gnd = QuantumCircuit(1, 1)
qc_gnd.measure(0, 0)
#And its ground schedule
gnd_schedule = schedule(qc_gnd, backend)
gnd_schedule.draw(backend=backend)
#We create the excited state
with pulse.build(backend=backend, default_alignment='sequential', name='excited state') as exc_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(rough_qubit_frequency, drive_chan)
pulse.call(pi_pulse)
#And another QC for the excited state
qc_exc = QuantumCircuit(1, 1)
#Apply X gate to qubit 0
qc_exc.x(0)
#And measure it
qc_exc.measure(0, 0)
#Then we add the calibration from the excited state's sched
qc_exc.add_calibration("x", (0,), exc_schedule, [])
#Now execute the exc state's schedule
exec_schedule = schedule(qc_exc, backend)
exec_schedule.draw(backend=backend)
#Preparation schedules for the ground and excited states
num_shots = 1024
job = backend.run([qc_gnd, qc_exc],
#Choosing meas_level 1 for kerneled data
meas_level=1,
meas_return='single',
shots=num_shots)
job_monitor(job)
gnd_exc_results = job.result(timeout=120)
#Getting the ground and excited state's results
gnd_results = gnd_exc_results.get_memory(0)[:, qubit]*scale_factor
exc_results = gnd_exc_results.get_memory(1)[:, qubit]*scale_factor
#Plotting results
plt.figure()
#Ground state in blue
plt.scatter(np.real(gnd_results), np.imag(gnd_results),
s=5, cmap='viridis', c='blue', alpha=0.5, label='Gnd state')
#Excited state in red
plt.scatter(np.real(exc_results), np.imag(exc_results),
s=5, cmap='viridis', c='red', alpha=0.5, label='Exc state')
plt.axis('square')
#Plot a large black dot for the average result of the 0 and 1 states
#Mean of real and imaginary parts of results
mean_gnd = np.mean(gnd_results)
mean_exc = np.mean(exc_results)
plt.scatter(np.real(mean_gnd), np.imag(mean_gnd),
s=100, cmap='viridis', c='black',alpha=1.0, label='Mean')
plt.scatter(np.real(mean_exc), np.imag(mean_exc),
s=100, cmap='viridis', c='black',alpha=1.0)
plt.ylabel('Im [a.u.]', fontsize=15)
plt.xlabel('Q (Real) [a.u.]', fontsize=15)
plt.title("0-1 discrimination", fontsize=15)
plt.legend()
plt.show()
"""The mean value of the excited state is not in the center of the big red circle,
this is due to the population of excited states (red dots) not being
perfectly together, but several red dots are in the blue area."""
"""Setting up a classifier function:
returns 0 if a given point is closer to the mean of ground state results
and returns 1 if the point is closer to the avg exc state results"""
import math
#This functions classifies the given state as |0> or |1>.
def classify(point: complex):
def distance(a, b):
return math.sqrt((np.real(a) - np.real(b))**2 + (np.imag(a) - np.imag(b))**2)
return int(distance(point, mean_exc) < distance(point, mean_gnd))
#T1 time: time it takes for a qubit to decay from exc_state to gnd_state
"""To measure T1 we use the pi pulse we've calibrated, then a measure pulse.
But first we have to insert a delay"""
#T1 experiment parameters
time_max_sec = 450 * us
time_step_sec = 6.5 * us
delay_times_sec = np.arange(1 * us, time_max_sec, time_step_sec)
#We define the delay
delay = Parameter('delay')
#Create another quantum circuit
qc_t1 = QuantumCircuit(1, 1)
#X Gate
qc_t1.x(0)
#Delay
qc_t1.delay(delay, 0)
#Measurement
qc_t1.measure(0, 0)
#Calibration on X gate with our pi pulse
qc_t1.add_calibration("x", (0,), pi_pulse)
exp_t1_circs = [qc_t1.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in delay_times_sec]
#Schedule for T1
sched_idx = -1
t1_schedule = schedule(exp_t1_circs[sched_idx], backend)
t1_schedule.draw(backend=backend)
#Execution settings
num_shots = 256
job = backend.run(exp_t1_circs,
meas_level=1,
meas_return='single',
shots=num_shots)
job_monitor(job)
t1_results = job.result(timeout=120)
#Getting the results to plot
t1_values = []
for i in range(len(delay_times_sec)):
iq_data = t1_results.get_memory(i)[:,qubit] * scale_factor
#sum() returns the sum of all items.
#map() returns a map object of the result after applying a given
#function (sum) to each item of a given iterable.
t1_values.append(sum(map(classify, iq_data)) / num_shots)
plt.scatter(delay_times_sec/us, t1_values, color='black')
plt.title("$T_1$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.show()
#Fitting data with an exponential curve
fit_params, y_fit = fit_function(delay_times_sec/us, t1_values,
lambda x, A, C, T1: (A * np.exp(-x / T1) + C),
[-3, 3, 100]
)
_, _, T1 = fit_params
plt.scatter(delay_times_sec/us, t1_values, color='black')
plt.plot(delay_times_sec/us, y_fit, color='red', label=f"T1 = {T1:.2f} us")
plt.xlim(0, np.max(delay_times_sec/us))
plt.title("$T_1$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.legend()
plt.show()
#Measure qubit frequency (precisely) with Ramsey Experiment
#Apply a pi/2 pulse, wait some time, and then anothe pi/2 pulse.
#Ramsey experiment parameters
time_max_sec = 1.8 * us
time_step_sec = 0.025 * us
delay_times_sec = np.arange(0.1 * us, time_max_sec, time_step_sec)
#Drive parameters
#Drive amplitude for pi/2 is simply half the amplitude of the pi pulse
drive_amp = pi_amp / 2
#Build the x_90 pulse, which is an X rotation of 90 degrees, a pi/2 rotation
with pulse.build(backend) as x90_pulse:
drive_duration = get_closest_multiple_of_16(pulse.seconds_to_samples(drive_duration_sec))
drive_sigma = pulse.seconds_to_samples(drive_sigma_sec)
drive_chan = pulse.drive_channel(qubit)
pulse.play(pulse.Constant(duration=drive_duration,
amp=drive_amp,
name='x90_pulse'), drive_chan)
#Now we have to drive the pulses off-resonance an amount detuning_MHz
detuning_MHz = 2
ramsey_frequency = round(rough_qubit_frequency + detuning_MHz * MHz, 6) #Ramsey freq in Hz
#Pulse for Ramsey experiment
delay = Parameter('delay')
with pulse.build(backend=backend, default_alignment='sequential', name="Ramsey delay Experiment") as ramsey_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(ramsey_frequency, drive_chan)
#X pulse, Delay, X pulse
pulse.call(x90_pulse)
pulse.delay(delay, drive_chan)
pulse.call(x90_pulse)
#Ramsey gate
ramsey_gate = Gate("ramsey", 1, [delay])
#Another QC for Ramsey experiment
qc_ramsey = QuantumCircuit(1, 1)
#Adding the gate to the circuit
qc_ramsey.append(ramsey_gate, [0])
qc_ramsey.measure(0, 0)
qc_ramsey.add_calibration(ramsey_gate, (0,), ramsey_schedule, [delay])
exp_ramsey_circs = [qc_ramsey.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in delay_times_sec]
ramsey_schedule = schedule(exp_ramsey_circs[2], backend)
ramsey_schedule.draw(backend=backend)
#Execution settings for Ramsey experimet
num_shots = 256
job = backend.run(exp_ramsey_circs,
meas_level=1,
meas_return='single',
shots=num_shots)
job_monitor(job)
ramsey_results = job.result(timeout=120)
#Array for the results
ramsey_values = []
for i in range(len(delay_times_sec)):
iq_data = ramsey_results.get_memory(i)[:,qubit] * scale_factor
ramsey_values.append(sum(map(classify, iq_data)) / num_shots)
#Plotting the results
plt.scatter(delay_times_sec/us, np.real(ramsey_values), color='black')
plt.xlim(0, np.max(delay_times_sec/us))
plt.title("Ramsey Experiment", fontsize=15)
plt.xlabel('Delay between X90 pulses [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.show()
#Fitting data to a sinusoid
fit_params, y_fit = fit_function(delay_times_sec/us, np.real(ramsey_values),
lambda x, A, del_f_MHz, C, B: (
A * np.cos(2*np.pi*del_f_MHz*x - C) + B
),
[5, 1.5, 0, 0.25]
)
# Off-resonance component
_, del_f_MHz, _, _, = fit_params # freq is MHz since times in us
plt.scatter(delay_times_sec/us, np.real(ramsey_values), color='black')
plt.plot(delay_times_sec/us, y_fit, color='red', label=f"df = {del_f_MHz:.2f} MHz")
plt.xlim(0, np.max(delay_times_sec/us))
plt.xlabel('Delay between X90 pulses [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Ramsey Experiment', fontsize=15)
plt.legend()
plt.show()
precise_qubit_freq = rough_qubit_frequency + (detuning_MHz - del_f_MHz) * MHz # get new freq in Hz
print(f"Our updated qubit frequency is now {round(precise_qubit_freq/GHz, 6)} GHz. "
f"It used to be {round(rough_qubit_frequency / GHz, 6)} GHz")
#Measuring coherence time (T2) with Hahn Echoes experiment
#It's the same as Ramsey's experiment, with a pi pulse between the pi/2
#T2 experiment parameters
tau_max_sec = 200 * us
tau_step_sec = 4 * us
delay_times_sec = np.arange(2 * us, tau_max_sec, tau_step_sec)
#Define the delay and build the pulse for T2
delay = Parameter('delay')
with pulse.build(backend=backend, default_alignment='sequential', name="T2 delay Experiment") as t2_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(precise_qubit_freq, drive_chan)
#X pulse, delay, pi pulse, delay, X pulse
pulse.call(x90_pulse)
pulse.delay(delay, drive_chan)
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
pulse.call(x90_pulse)
#Define T2 gate
t2_gate = Gate("t2", 1, [delay])
#QC for T2
qc_t2 = QuantumCircuit(1, 1)
#Add T2 gate
qc_t2.append(t2_gate, [0])
qc_t2.measure(0, 0)
#Add calibration with delay
qc_t2.add_calibration(t2_gate, (0,), t2_schedule, [delay])
exp_t2_circs = [qc_t2.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in delay_times_sec]
#Schedule for T2 and show it
t2_schedule = schedule(exp_t2_circs[-1], backend)
t2_schedule.draw(backend=backend)
#Execution settings
num_shots_per_point = 512
job = backend.run(exp_t2_circs,
meas_level=1, #Kerneled data
meas_return='single',
shots=num_shots_per_point)
job_monitor(job)
#Getting results
t2_results = job.result(timeout=120)
#T2 empty array
t2_values = []
#Retrieving results and adding them classified to the array
for i in range(len(delay_times_sec)):
iq_data = t2_results.get_memory(i)[:,qubit] * scale_factor
t2_values.append(sum(map(classify, iq_data)) / num_shots_per_point)
#Plot ressults for Hanh Echo experiment
plt.scatter(2*delay_times_sec/us, t2_values, color='black')
plt.xlabel('Delay between X90 pulse and $\pi$ pulse [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Hahn Echo Experiment', fontsize=15)
plt.show()
#Fitting data with an exponential function
fit_params, y_fit = fit_function(2*delay_times_sec/us, t2_values,
lambda x, A, B, T2: (A * np.exp(-x / T2) + B),
[-3, 0, 100])
_, _, T2 = fit_params
#Plotting results and fit curve
plt.scatter(2*delay_times_sec/us, t2_values, color='black')
plt.plot(2*delay_times_sec/us, y_fit, color='red', label=f"T2 = {T2:.2f} us")
plt.xlim(0, np.max(2*delay_times_sec/us))
plt.xlabel('Delay between X90 pulse and $\pi$ pulse [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Hahn Echo Experiment', fontsize=15)
plt.legend()
plt.show()
#Dynamical decoupling technique
#Used to extract longer coherence times from qubits
#Experiment parameters
tau_sec_min = 1 * us
tau_sec_max = 180 * us
tau_step_sec = 4 * us
taus_sec = np.arange(tau_sec_min, tau_sec_max, tau_step_sec)
num_sequence = 1
print(f"Total time ranges from {2.*num_sequence*taus_sec[0] / us} to {2.*num_sequence*taus_sec[-1] / us} us")
#This schedule is different from the others...
delay = Parameter('delay')
with pulse.build(backend=backend, default_alignment='sequential', name="T2DD delay Experiment") as T2DD_schedule:
drive_chan = pulse.drive_channel(qubit)
pulse.set_frequency(precise_qubit_freq, drive_chan)
#X Pulse and Delay
pulse.call(x90_pulse)
pulse.delay(delay/2, drive_chan)
for loop_counts in range(num_sequence):
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
with pulse.phase_offset(np.pi/2, pulse.drive_channel(qubit)):
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
pulse.call(pi_pulse)
pulse.delay(delay, drive_chan)
with pulse.phase_offset(np.pi/2, pulse.drive_channel(qubit)):
pulse.call(pi_pulse)
if loop_counts != num_sequence-1:
pulse.delay(delay, drive_chan)
pulse.delay(delay/2, drive_chan)
pulse.call(x90_pulse)
#Create new gate for T2 Dynamical Decouling
T2DD_gate = Gate("T2DD", 1, [delay])
#Another QC
qc_T2DD = QuantumCircuit(1, 1)
qc_T2DD.append(T2DD_gate, [0])
qc_T2DD.measure(0, 0)
qc_T2DD.add_calibration(T2DD_gate, (0,), T2DD_schedule, [delay])
exp_T2DD_circs = [qc_T2DD.assign_parameters({delay: get_dt_from(d)}, inplace=False) for d in taus_sec]
#Schedule fot T2 D.D.
T2DD_schedule = schedule(exp_T2DD_circs[-1], backend)
T2DD_schedule.draw(backend=backend)
num_shots_per_point = 1024
job = backend.run(exp_T2DD_circs,
meas_level=1,
meas_return='single',
shots=num_shots_per_point)
job_monitor(job)
T2DD_results = job.result(timeout=120)
times_sec = 4*num_sequence*taus_sec
DD_values = []
for i in range(len(times_sec)):
iq_data = T2DD_results.get_memory(i)[:,qubit] * scale_factor
DD_values.append(sum(map(classify, iq_data)) / num_shots_per_point)
plt.scatter(times_sec/us, DD_values, color='black')
plt.xlim(0, np.max(times_sec/us))
plt.xlabel('Total time before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Dynamical Decoupling Experiment', fontsize=15)
plt.show()
# Fit the data
fit_func = lambda x, A, B, T2DD: (A * np.exp(-x / T2DD) + B)
fitparams, conv = curve_fit(fit_func, times_sec/us, DD_values, [3.5, 0.8, 150])
_, _, T2DD = fitparams
plt.scatter(times_sec/us, DD_values, color='black')
plt.plot(times_sec/us, fit_func(times_sec/us, *fitparams), color='red', label=f"T2DD = {T2DD:.2f} us")
plt.xlim([0, np.max(times_sec/us)])
plt.xlabel('Total time before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Measured Signal [a.u.]', fontsize=15)
plt.title('Dynamical Decoupling Experiment', fontsize=15)
plt.legend()
plt.show()
"""That little oscillation in the DD experiment graph
means that my qubit is not totally in resonance,
it's still oscillating between two states.
It can be fixed by adjusting parameters in every experiment,
and improving the curve fittings."""
|
https://github.com/COFAlumni-USB/qiskit-fall-2022
|
COFAlumni-USB
|
import numpy as np
import math
import qiskit as qiskit
from numpy import sqrt
from random import randint
from qiskit import *
from qiskit import Aer, QuantumCircuit, IBMQ, execute, quantum_info, transpile
from qiskit.visualization import plot_state_city, plot_bloch_multivector
from qiskit.visualization import plot_histogram
from qiskit.tools import job_monitor
from qiskit.providers.fake_provider import FakeOpenPulse2Q, FakeOpenPulse3Q, FakeManila, FakeValencia, FakeHanoi
from qiskit import pulse, transpile
from qiskit.pulse.library import Gaussian
#Compuertas personalizadas
from qiskit.circuit import Gate
from qiskit import QiskitError
#informacion
import qiskit.tools.jupyter
provider = IBMQ.load_account()
belem = provider.get_backend('ibmq_belem')
print('se ha ejecutado correctamente')
def SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
return qc
def SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
return qc
def SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_1)
return qc
def SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
qc.x(x_qubit_1)
return qc
def random_oracle(qc, x_qubit_0,x_qubit_1,y_qubit):
rand=randint(0,3)
if rand==3:
SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==2:
SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==1:
SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit)
else:
SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit)
return qc
def Grover_Iteration(qc, x_qubit_0,x_qubit_1):
qc.h(range(2))
qc.x(range(2))
qc.h(x_qubit_1)
qc.cx(x_qubit_0,x_qubit_1)
qc.h(x_qubit_1)
qc.x(range(2))
qc.h(range(2))
return qc
print('se ha ejecutado correctamente')
#Simulador de prueba FakeManila
backend = FakeManila()
x_register=2
y_register=1
measure_register=2
y_position=x_register+y_register-1
circ = QuantumCircuit(x_register+y_register,measure_register)
circ.x(y_position)
circ.barrier()
circ.h(range(x_register+y_register))
circ.barrier()
random_oracle(circ, 0,1,2)
circ.barrier()
Grover_Iteration(circ, 0,1)
circ.measure(range(x_register),range(measure_register))
circ.draw('mpl')
result = execute(circ, backend=FakeManila()).result();
job = backend.run(circ)
counts = job.result().get_counts()
plot_histogram(counts)
backend = FakeManila();
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0))
h_q0.draw()
backend = FakeManila();
with pulse.build(backend, name='hadamard') as h_q0: #el 'h_q0 es invariable y puede ser nombrado de cualquier forma solo identifica'
pulse.play(Gaussian(duration=100, amp=0.1, sigma=33.33), pulse.drive_channel(0))
h_q0.draw()
circ.add_calibration( 'h', [0], h_q0)
circ.add_calibration( 'x', [0], h_q0)
circ.add_calibration( 'cx',[0], h_q0)
circ.add_calibration( 'sx',[0], h_q0)
circ.add_calibration( 'id',[0], h_q0)
circ.add_calibration( 'rz',[0], h_q0)
circ.add_calibration( 'reset',[0], h_q0)
backend = FakeManila()
circ1 = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ1.draw('mpl', idle_wires=False)
result = execute(circ1, backend=FakeManila()).result();
job = backend.run(circ1)
counts = job.result().get_counts()
plot_histogram(counts)
def SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
return qc
def SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
return qc
def SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_1)
return qc
def SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
qc.x(x_qubit_1)
return qc
def random_oracle(qc, x_qubit_0,x_qubit_1,y_qubit):
rand=randint(0,3)
if rand==3:
SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==2:
SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==1:
SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit)
else:
SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit)
return qc
def Grover_Iteration(qc, x_qubit_0,x_qubit_1):
qc.h(range(2))
qc.x(range(2))
qc.h(x_qubit_1)
qc.cx(x_qubit_0,x_qubit_1)
qc.h(x_qubit_1)
qc.x(range(2))
qc.h(range(2))
return qc
print('se ha ejecutado correctamente')
#Simulador de prueba FakeOpenPulse3Q
backend = FakeOpenPulse3Q()
x_register=2
y_register=1
measure_register=2
y_position=x_register+y_register-1
circ = QuantumCircuit(x_register+y_register,measure_register)
circ.x(y_position)
circ.barrier()
circ.h(range(x_register+y_register))
circ.barrier()
random_oracle(circ, 0,1,2)
circ.barrier()
Grover_Iteration(circ, 0,1)
circ.measure(range(x_register),range(measure_register))
circ.draw('mpl')
result = execute(circ, backend=FakeOpenPulse3Q()).result();
job = backend.run(circ)
counts = job.result().get_counts()
plot_histogram(counts)
backend = FakeOpenPulse3Q();
with pulse.build(backend, name='hadamard') as h_q1:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0))
h_q1.draw()
backend = FakeOpenPulse3Q();
with pulse.build(backend, name='hadamard') as h_q1: #el 'h_q1 es invariable y puede ser nombrado de cualquier forma solo identifica'
pulse.play(Gaussian(duration=100, amp=0.1, sigma=33.33), pulse.drive_channel(0))
h_q1.draw()
circ.add_calibration( 'h', [0], h_q1)
circ.add_calibration( 'x', [0], h_q1)
circ.add_calibration( 'cx',[0], h_q1)
circ.add_calibration( 'sx',[0], h_q1)
circ.add_calibration( 'id',[0], h_q1)
circ.add_calibration( 'rz',[0], h_q1)
circ.add_calibration( 'reset',[0], h_q1)
backend = FakeOpenPulse3Q()
circ2 = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ2.draw('mpl', idle_wires=False)
result = execute(circ2, backend=FakeManila()).result();
job = backend.run(circ2)
counts = job.result().get_counts()
plot_histogram(counts)
def SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
return qc
def SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
return qc
def SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_1)
return qc
def SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
qc.x(x_qubit_1)
return qc
def random_oracle(qc, x_qubit_0,x_qubit_1,y_qubit):
rand=randint(0,3)
if rand==3:
SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==2:
SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==1:
SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit)
else:
SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit)
return qc
def Grover_Iteration(qc, x_qubit_0,x_qubit_1):
qc.h(range(2))
qc.x(range(2))
qc.h(x_qubit_1)
qc.cx(x_qubit_0,x_qubit_1)
qc.h(x_qubit_1)
qc.x(range(2))
qc.h(range(2))
return qc
print('se ha ejecutado correctamente')
#Simulador de prueba FakeManila
backend = FakeManila()
x_register=2
y_register=1
measure_register=2
y_position=x_register+y_register-1
circ = QuantumCircuit(x_register+y_register,measure_register)
circ.x(y_position)
circ.barrier()
circ.h(range(x_register+y_register))
circ.barrier()
random_oracle(circ, 0,1,2)
circ.barrier()
Grover_Iteration(circ, 0,1)
circ.measure(range(x_register),range(measure_register))
circ.draw('mpl')
result = execute(circ, backend=FakeManila()).result();
job = backend.run(circ)
counts = job.result().get_counts()
plot_histogram(counts)
backend = FakeManila();
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0))
h_q0.draw()
backend = FakeManila();
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=10), pulse.drive_channel(0))
h_q0.draw()
circ.add_calibration( 'h', [0], h_q0)
circ.add_calibration( 'x', [0], h_q0)
circ.add_calibration( 'cx',[0], h_q0)
circ.add_calibration( 'sx',[0], h_q0)
circ.add_calibration( 'id',[0], h_q0)
circ.add_calibration( 'rz',[0], h_q0)
circ.add_calibration( 'reset',[0], h_q0)
backend = FakeManila()
circ1 = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ1.draw('mpl', idle_wires=False)
result = execute(circ1, backend=FakeManila()).result();
job = backend.run(circ1)
counts = job.result().get_counts()
plot_histogram(counts)
def SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
return qc
def SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
return qc
def SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_1)
return qc
def SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
qc.x(x_qubit_1)
return qc
def random_oracle(qc, x_qubit_0,x_qubit_1,y_qubit):
rand=randint(0,3)
if rand==3:
SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==2:
SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==1:
SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit)
else:
SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit)
return qc
def Grover_Iteration(qc, x_qubit_0,x_qubit_1):
qc.h(range(2))
qc.x(range(2))
qc.h(x_qubit_1)
qc.cx(x_qubit_0,x_qubit_1)
qc.h(x_qubit_1)
qc.x(range(2))
qc.h(range(2))
return qc
print('se ha ejecutado correctamente')
#Simulador de prueba FakeOpenPulse3Q
backend = FakeOpenPulse3Q()
x_register=2
y_register=1
measure_register=2
y_position=x_register+y_register-1
circ = QuantumCircuit(x_register+y_register,measure_register)
circ.x(y_position)
circ.barrier()
circ.h(range(x_register+y_register))
circ.barrier()
random_oracle(circ, 0,1,2)
circ.barrier()
Grover_Iteration(circ, 0,1)
circ.measure(range(x_register),range(measure_register))
circ.draw('mpl')
result = execute(circ, backend=FakeOpenPulse3Q()).result();
job = backend.run(circ)
counts = job.result().get_counts()
plot_histogram(counts)
backend = FakeOpenPulse3Q();
with pulse.build(backend, name='hadamard') as h_q1:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0))
h_q1.draw()
backend = FakeOpenPulse3Q();
with pulse.build(backend, name='hadamard') as h_q1:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=10), pulse.drive_channel(0))
h_q1.draw()
circ.add_calibration( 'h', [0], h_q1)
circ.add_calibration( 'x', [0], h_q1)
circ.add_calibration( 'cx',[0], h_q1)
circ.add_calibration( 'sx',[0], h_q1)
circ.add_calibration( 'id',[0], h_q1)
circ.add_calibration( 'rz',[0], h_q1)
circ.add_calibration( 'reset',[0], h_q1)
backend = FakeOpenPulse3Q()
circ2 = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ2.draw('mpl', idle_wires=False)
result = execute(circ2, backend=FakeOpenPulse3Q()).result();
job = backend.run(circ2)
counts = job.result().get_counts()
plot_histogram(counts)
def SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
return qc
def SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
return qc
def SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_1)
return qc
def SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
qc.x(x_qubit_1)
return qc
def random_oracle(qc, x_qubit_0,x_qubit_1,y_qubit):
rand=randint(0,3)
if rand==3:
SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==2:
SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==1:
SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit)
else:
SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit)
return qc
def Grover_Iteration(qc, x_qubit_0,x_qubit_1):
qc.h(range(2))
qc.x(range(2))
qc.h(x_qubit_1)
qc.cx(x_qubit_0,x_qubit_1)
qc.h(x_qubit_1)
qc.x(range(2))
qc.h(range(2))
return qc
print('se ha ejecutado correctamente')
#Simulador de prueba FakeManila
backend = FakeManila()
x_register=2
y_register=1
measure_register=2
y_position=x_register+y_register-1
circ = QuantumCircuit(x_register+y_register,measure_register)
circ.x(y_position)
circ.barrier()
circ.h(range(x_register+y_register))
circ.barrier()
random_oracle(circ, 0,1,2)
circ.barrier()
Grover_Iteration(circ, 0,1)
circ.measure(range(x_register),range(measure_register))
circ.draw('mpl')
result = execute(circ, backend=FakeManila()).result();
job = backend.run(circ)
counts = job.result().get_counts()
plot_histogram(counts)
backend = FakeManila();
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0))
h_q0.draw()
backend = FakeManila();
with pulse.build(backend, name='hadamard') as h_q0:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=5), pulse.drive_channel(0))
h_q0.draw()
circ.add_calibration( 'h', [0], h_q0)
circ.add_calibration( 'x', [0], h_q0)
circ.add_calibration( 'cx',[0], h_q0)
circ.add_calibration( 'sx',[0], h_q0)
circ.add_calibration( 'id',[0], h_q0)
circ.add_calibration( 'rz',[0], h_q0)
circ.add_calibration( 'reset',[0], h_q0)
backend = FakeManila()
circ1 = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ1.draw('mpl', idle_wires=False)
result = execute(circ1, backend=FakeManila()).result();
job = backend.run(circ1)
counts = job.result().get_counts()
plot_histogram(counts)
def SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
return qc
def SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
return qc
def SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_1)
return qc
def SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit):
qc.x(x_qubit_0)
qc.x(x_qubit_1)
qc.ccx(x_qubit_0,x_qubit_1,y_qubit)
qc.x(x_qubit_0)
qc.x(x_qubit_1)
return qc
def random_oracle(qc, x_qubit_0,x_qubit_1,y_qubit):
rand=randint(0,3)
if rand==3:
SoQ_Grover_0(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==2:
SoQ_Grover_1(qc, x_qubit_0,x_qubit_1,y_qubit)
elif rand==1:
SoQ_Grover_2(qc, x_qubit_0,x_qubit_1,y_qubit)
else:
SoQ_Grover_3(qc, x_qubit_0,x_qubit_1,y_qubit)
return qc
def Grover_Iteration(qc, x_qubit_0,x_qubit_1):
qc.h(range(2))
qc.x(range(2))
qc.h(x_qubit_1)
qc.cx(x_qubit_0,x_qubit_1)
qc.h(x_qubit_1)
qc.x(range(2))
qc.h(range(2))
return qc
print('se ha ejecutado correctamente')
#Simulador de prueba FakeOpenPulse3Q
backend = FakeOpenPulse3Q()
x_register=2
y_register=1
measure_register=2
y_position=x_register+y_register-1
circ = QuantumCircuit(x_register+y_register,measure_register)
circ.x(y_position)
circ.barrier()
circ.h(range(x_register+y_register))
circ.barrier()
random_oracle(circ, 0,1,2)
circ.barrier()
Grover_Iteration(circ, 0,1)
circ.measure(range(x_register),range(measure_register))
circ.draw('mpl')
result = execute(circ, backend=FakeOpenPulse3Q()).result();
job = backend.run(circ)
counts = job.result().get_counts()
plot_histogram(counts)
backend = FakeOpenPulse3Q();
with pulse.build(backend, name='hadamard') as h_q1:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=3), pulse.drive_channel(0))
h_q1.draw()
backend = FakeOpenPulse3Q();
with pulse.build(backend, name='hadamard') as h_q1:
pulse.play(Gaussian(duration=100, amp=0.1, sigma=5), pulse.drive_channel(0))
h_q1.draw()
circ.add_calibration( 'h', [0], h_q1)
circ.add_calibration( 'x', [0], h_q1)
circ.add_calibration( 'cx',[0], h_q1)
circ.add_calibration( 'sx',[0], h_q1)
circ.add_calibration( 'id',[0], h_q1)
circ.add_calibration( 'rz',[0], h_q1)
circ.add_calibration( 'reset',[0], h_q1)
backend = FakeOpenPulse3Q()
circ2 = transpile(circ, backend)
print(backend.configuration().basis_gates)
circ2.draw('mpl', idle_wires=False)
result = execute(circ2, backend=FakeOpenPulse3Q()).result();
job = backend.run(circ2)
counts = job.result().get_counts()
plot_histogram(counts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.