repo
stringclasses 900
values | file
stringclasses 754
values | content
stringlengths 4
215k
|
|---|---|---|
https://github.com/Qiskit/feedback
|
Qiskit
|
import qiskit_alt
qiskit_alt.project.ensure_init(calljulia="juliacall", compile=False)
julia = qiskit_alt.project.julia
Main = julia.Main
julia.Main.zeros(3)
type(julia.Main.zeros(3))
from qiskit_nature.drivers import UnitsType, Molecule
from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver
# Specify the geometry of the H_2 molecule
geometry = [['H', [0., 0., 0.]],
['H', [0., 0., 0.735]]]
basis = 'sto3g'
molecule = Molecule(geometry=geometry,
charge=0, multiplicity=1)
driver = ElectronicStructureMoleculeDriver(molecule, basis=basis, driver_type=ElectronicStructureDriverType.PYSCF)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
es_problem = ElectronicStructureProblem(driver)
second_q_op = es_problem.second_q_ops()
fermionic_hamiltonian = second_q_op[0]
qubit_converter = QubitConverter(mapper=JordanWignerMapper())
nature_qubit_op = qubit_converter.convert(fermionic_hamiltonian)
nature_qubit_op.primitive
import qiskit_alt.electronic_structure
fermi_op = qiskit_alt.electronic_structure.fermionic_hamiltonian(geometry, basis)
pauli_op = qiskit_alt.electronic_structure.jordan_wigner(fermi_op)
pauli_op.simplify() # The Julia Pauli operators use a different sorting convention; we sort again for comparison.
%run ../bench/jordan_wigner_nature_time.py
nature_times
%run ../bench/jordan_wigner_alt_time.py
alt_times
[t_nature / t_qk_alt for t_nature, t_qk_alt in zip(nature_times, alt_times)]
%run ../bench/fermionic_nature_time.py
nature_times
%run ../bench/fermionic_alt_time.py
alt_times
[t_nature / t_qk_alt for t_nature, t_qk_alt in zip(nature_times, alt_times)]
from qiskit_alt.pauli_operators import QuantumOps, PauliSum_to_SparsePauliOp
import numpy as np
m = np.random.rand(2**3, 2**3) # 3-qubit operator
m = Main.convert(Main.Matrix, m) # Convert PythonCall.PyArray to a native Julia type
pauli_sum = QuantumOps.PauliSum(m) # This is a wrapped Julia object
PauliSum_to_SparsePauliOp(pauli_sum) # Convert to qiskit.quantum_info.SparsePauliOp
%run ../bench/from_matrix_quantum_info.py
%run ../bench/from_matrix_alt.py
[t_pyqk / t_qk_alt for t_pyqk, t_qk_alt in zip(pyqk_times, qk_alt_times)]
%run ../bench/pauli_from_list_qinfo.py
%run ../bench/pauli_from_list_alt.py
[x / y for x,y in zip(quantum_info_times, qkalt_times)]
import qiskit.tools.jupyter
d = qiskit.__qiskit_version__._version_dict
d['qiskit_alt'] = qiskit_alt.__version__
%qiskit_version_table
# %load_ext julia.magic
# %julia using Random: randstring
#%julia pauli_strings = [randstring("IXYZ", 10) for _ in 1:1000]
# None;
# %julia import Pkg; Pkg.add("BenchmarkTools")
#%julia using BenchmarkTools: @btime
#%julia @btime QuantumOps.PauliSum($pauli_strings)
#None;
#%julia pauli_sum = QuantumOps.PauliSum(pauli_strings);
#%julia println(length(pauli_sum))
#%julia println(pauli_sum[1])
#6.9 * 2.29 / .343 # Ratio of time to construct PauliSum via qiskit_alt to time in pure Julia
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit import IBMQ
IBMQ.save_account(token='MY_API_TOKEN')
provider = IBMQ.load_account() # loads saved account from disk
from qiskit_ibm_provider import IBMProvider
IBMProvider.save_account(token='MY_API_TOKEN')
provider = IBMProvider() # loads default saved account from disk
IBMProvider.save_account(token='MY_API_TOKEN_2', name="personal")
provider = IBMProvider(name="personal") # loads saved account from disk named "personal"
from qiskit import IBMQ
IBMQ.stored_account() # get saved account from qiskitrc file
from qiskit_ibm_provider import IBMProvider
IBMProvider.saved_accounts() # get saved accounts from qiskit-ibm.json file
# export QE_TOKEN='MY_API_TOKEN' (bash command)
from qiskit import IBMQ
provider = IBMQ.load_account() # loads account from env variables
# export QISKIT_IBM_TOKEN='MY_API_TOKEN' (bash command)
from qiskit_ibm_provider import IBMProvider
provider = IBMProvider() # loads account from env variables
from qiskit import IBMQ
provider = IBMQ.enable_account(token='MY_API_TOKEN') # enable account for current session
from qiskit_ibm_provider import IBMProvider
provider = IBMProvider(token='MY_API_TOKEN') # enable account for current session
from qiskit import IBMQ
provider = IBMQ.load_account() # load saved account
IBMQ.active_account() # check active account
from qiskit_ibm_provider import IBMProvider
provider = IBMProvider() # load saved account
provider.active_account() # check active account
from qiskit import IBMQ
IBMQ.delete_account() # delete saved account from qiskitrc file
from qiskit_ibm_provider import IBMProvider
IBMProvider.delete_account() # delete saved account from saved credentials
provider.backends()
backend = provider.get_backend("ibmq_manila")
print(backend)
from qiskit import QuantumCircuit
# Bell state circuit
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc.draw()
from qiskit import transpile
qc = transpile(qc, backend)
job = backend.run(circuits=qc, shots=1024)
result = job.result()
print(result)
from qiskit.visualization import plot_histogram
plot_histogram(result.get_counts())
from qiskit.providers.ibmq.managed import IBMQJobManager
job_manager = IBMQJobManager()
job_set = job_manager.run(long_list_of_circuits, backend=backend)
results = job_set.results()
job = backend.run(long_list_of_circuits) # returns IBMCompositeJob
result = job.result()
provider.jobs()
job = provider.job(job_id="627c7cb57c918115bb9a73fe")
print(job)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_cold_atom.providers import ColdAtomProvider
provider = ColdAtomProvider()
for backend in provider.backends():
print(backend)
import numpy as np
from qiskit import QuantumCircuit
backend = provider.get_backend("collective_spin_simulator")
circ_x = QuantumCircuit(1)
circ_x.rlx(np.pi/2, 0)
circ_x.measure_all()
circ_x.draw("mpl")
from qiskit.visualization import plot_histogram
job_x = backend.run(circ_x, shots = 1000, spin=20, seed=14)
plot_histogram(job_x.result().get_counts())
squeez_circ = QuantumCircuit(1, 1)
squeez_circ.rlx(-np.pi/2, 0)
squeez_circ.rlz2(0.3, 0)
squeez_circ.rlz(-np.pi/2, 0)
squeez_circ.rlx(-0.15, 0)
squeez_circ.measure(0, 0)
squeez_circ.draw(output='mpl')
job_squeez = backend.run(squeez_circ, shots = 1000, spin=20, seed=14)
plot_histogram(job_squeez.result().get_counts())
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,5))
fig.tight_layout(pad=5.0)
ax1.set_title("spin squeezing circuit")
ax2.set_title("single rotation circuit")
plot_histogram(job_x.result().get_counts(), ax=ax2)
plot_histogram(job_squeez.result().get_counts(), ax=ax1, color="#9f1853")
from qiskit_cold_atom.applications import FermiHubbard1D
# defining the system
system = FermiHubbard1D(
num_sites = 3, # number of lattice sites
particles_up = 1, # number of spin up particles
particles_down = 1, # number of spin down particles
hop_strength = 2., # parameter J tuning the hopping
int_strength = 1, # parameter U tuning the interaction
potential = [1, 0, -1] # parameters mu tuning the local potential
)
from qiskit_cold_atom.fermions.fermion_circuit_solver import FermionicState
# One spin_up particle in the left site & one spin-down particle in the right site
initial_state = FermionicState([[1, 0, 0], [0, 0, 1]])
from qiskit_nature.operators.second_quantization import FermionicOp
spin_density = FermionicOp([('NIIIII', 1), ('IIINII', -1)])
evolution_times = np.linspace(0.1, 5, 40) # times to simulate
from qiskit_cold_atom.applications import FermionicEvolutionProblem
spin_problem = FermionicEvolutionProblem(system, initial_state, evolution_times, spin_density)
from qiskit_cold_atom.providers.fermionic_tweezer_backend import FermionicTweezerSimulator
from qiskit_cold_atom.applications import TimeEvolutionSolver
fermionic_backend = FermionicTweezerSimulator(n_tweezers=3)
fermionic_solver = TimeEvolutionSolver(backend = fermionic_backend)
from qiskit import Aer
qubit_backend = Aer.get_backend('qasm_simulator')
mapping = 'bravyi_kitaev' # or 'jordan_wigner', 'parity'
shallow_qubit_solver = TimeEvolutionSolver(backend = qubit_backend, map_type = mapping, trotter_steps = 1)
deep_qubit_solver = TimeEvolutionSolver(backend = qubit_backend, map_type = mapping, trotter_steps = 6)
spin_vals_fermions = fermionic_solver.solve(spin_problem)
spin_vals_qubits_shallow = shallow_qubit_solver.solve(spin_problem)
spin_vals_qubits_deep = deep_qubit_solver.solve(spin_problem)
plt.xlabel('evolution time')
plt.ylabel('Spin at site 1')
plt.title('evolution of spin density')
plt.plot(evolution_times, spin_vals_qubits_shallow, '--o', color='#d02670', alpha=0.5, label='qubits, 1 trotter step')
plt.plot(evolution_times, spin_vals_qubits_deep, '-o', color='#08bdba', alpha=0.8, label='qubits, 6 trotter steps')
plt.plot(evolution_times, spin_vals_fermions, '-o', color='#0f62fe', alpha=0.8, label='fermionic backend')
plt.legend()
fermion_circuit = spin_problem.circuits(fermionic_backend.initialize_circuit(initial_state.occupations))[-1]
fermion_circuit.measure_all()
fermion_circuit.draw(output='mpl')
qubit_circuit = deep_qubit_solver.construct_qubit_circuits(spin_problem)[-1]
qubit_circuit.measure_all()
qubit_circuit.decompose().decompose().draw("mpl", idle_wires=False)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import os
print(f"Working directory: '{os.getcwd()}'")
from test.python.transpiler.aqc.sample_data import ORIGINAL_CIRCUIT, INITIAL_THETAS
import numpy as np
from scipy.stats import unitary_group
from time import perf_counter
from qiskit import QuantumCircuit
from qiskit.algorithms.optimizers import L_BFGS_B
from qiskit.quantum_info import Operator
from qiskit.transpiler.synthesis.aqc.aqc import AQC
from qiskit.transpiler.synthesis.aqc.cnot_structures import make_cnot_network
from qiskit.transpiler.synthesis.aqc.cnot_unit_circuit import CNOTUnitCircuit
from qiskit.transpiler.synthesis.aqc.cnot_unit_objective import DefaultCNOTUnitObjective
from qiskit.transpiler.synthesis.aqc.fast_gradient.fast_gradient import FastCNOTUnitObjective
def print_misfits(target_mat: np.ndarray, approx_circ: QuantumCircuit, exe_tm: float):
"""
Calculates and prints out misfit measures.
"""
dim = target_mat.shape[0]
approx_mat = Operator(approx_circ).data
diff = approx_mat - target_mat
hs = np.vdot(approx_mat, target_mat) # HS == Tr(V.H @ U)
fidelity = (1.0 + np.abs(hs) ** 2 / dim) / (dim + 1)
fro_err = 0.5 * (np.linalg.norm(diff, "fro") ** 2) / dim
sin_err = np.linalg.norm(diff, 2)
print("\nApproximation misfit:")
print(f"Cost function based on Frobenius norm: {fro_err:0.5f}")
print(f"Fidelity: {fidelity:0.5f}")
print(f"Max. singular value of (V - U): {sin_err:0.5f}")
print(f"Execution time: {exe_tm:0.3f} secs")
print()
def make_target_matrix(num_qubits: int, target_name: str):
"""Creats a target matrix for testing."""
if target_name == "mcx":
# Define the target matrix: multi-control CNOT gate matrix.
target_matrix = np.eye(2**num_qubits, dtype=np.cfloat)
target_matrix[-2:, -2:] = [[0, 1], [1, 0]]
else:
# Define a random SU target matrix.
dim = 2 ** num_qubits
target_matrix = unitary_group.rvs(dim)
target_matrix /= (np.linalg.det(target_matrix) ** (1.0 / float(dim)))
return target_matrix
def create_circuit_and_objective(num_qubits: int, cnots: np.ndarray, fast: bool):
"""Instantiates AQC circuit and objective classes."""
circ = CNOTUnitCircuit(num_qubits, cnots)
if fast:
objv = FastCNOTUnitObjective(num_qubits, cnots)
else:
objv = DefaultCNOTUnitObjective(num_qubits, cnots)
return circ, objv
def aqc_example_hardcoded(fast: bool):
tic = perf_counter()
print(f"{'-' * 80}\nRunning \"{'accelerated' if fast else 'default'}\" "
f"AQC implementation on a hardcoded target matrix...\n{'-' * 80}")
# Define the number of qubits, circuit layout and other parameters.
seed = 12345
num_qubits = int(round(np.log2(ORIGINAL_CIRCUIT.shape[0])))
depth = 0 # depth=0 implies maximum depth
cnots = make_cnot_network(
num_qubits=num_qubits, network_layout="spin", connectivity_type="full", depth=depth
)
# Define the target matrix.
target_matrix = ORIGINAL_CIRCUIT
# Create instances of the optimizer, AQC, circuit and objective classes.
optimizer = L_BFGS_B(maxiter=200)
circ, objv = create_circuit_and_objective(num_qubits, cnots, fast=fast)
aqc = AQC(optimizer=optimizer, seed=seed)
# Optimize the circuit to approximate the target matrix.
aqc.compile_unitary(
target_matrix=target_matrix,
approximate_circuit=circ,
approximating_objective=objv,
initial_point=INITIAL_THETAS,
)
toc = perf_counter()
print_misfits(target_matrix, circ, toc - tic)
aqc_example_hardcoded(fast=False)
aqc_example_hardcoded(fast=True)
def aqc_example(target_name: str, depth: int, fast: bool):
tic = perf_counter()
print(f"{'-' * 80}\nRunning {'accelerated' if fast else 'default'} AQC on "
f"'{target_name}' target matrix with random initial guess...\n{'-' * 80}")
# Define the number of qubits, circuit layout and other parameters.
seed = 777
np.random.seed(seed)
num_qubits = int(4)
cnots = make_cnot_network(
num_qubits=num_qubits, network_layout="spin", connectivity_type="full", depth=depth
)
# Define a target matrix:
target_matrix = make_target_matrix(num_qubits, target_name)
# Create instances of the optimizer, AQC, circuit and objective classes.
optimizer = L_BFGS_B(maxiter=1500)
circ, objv = create_circuit_and_objective(num_qubits, cnots, fast=fast)
aqc = AQC(optimizer=optimizer, seed=seed)
# Optimize the circuit to approximate the target matrix.
aqc.compile_unitary(
target_matrix=target_matrix,
approximate_circuit=circ,
approximating_objective=objv,
initial_point=2 * np.pi * np.random.rand(objv.num_thetas),
)
toc = perf_counter()
print_misfits(target_matrix, circ, toc - tic)
depth = 64
print(f"\n\n******* Circuit depth = {depth}: *******\n")
aqc_example("random", depth, fast=True)
aqc_example("mcx", depth, fast=True)
depth = 40
print(f"\n\n******* Circuit depth = {depth}: *******\n")
aqc_example("random", depth, fast=True)
aqc_example("mcx", depth, fast=True)
aqc_example("mcx", depth, fast=False)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_nature.problems.sampling.protein_folding.interactions.random_interaction import (
RandomInteraction,
)
from qiskit_nature.problems.sampling.protein_folding.interactions.miyazawa_jernigan_interaction import (
MiyazawaJerniganInteraction,
)
from qiskit_nature.problems.sampling.protein_folding.peptide.peptide import Peptide
from qiskit_nature.problems.sampling.protein_folding.protein_folding_problem import (
ProteinFoldingProblem,
)
from qiskit_nature.problems.sampling.protein_folding.penalty_parameters import PenaltyParameters
from qiskit.utils import algorithm_globals, QuantumInstance
algorithm_globals.random_seed = 23
main_chain = "APRLRFY"
side_chains = [""] * 7
random_interaction = RandomInteraction()
mj_interaction = MiyazawaJerniganInteraction()
penalty_back = 10
penalty_chiral = 10
penalty_1 = 10
penalty_terms = PenaltyParameters(penalty_chiral, penalty_back, penalty_1)
peptide = Peptide(main_chain, side_chains)
protein_folding_problem = ProteinFoldingProblem(peptide, mj_interaction, penalty_terms)
qubit_op = protein_folding_problem.qubit_op()
print(qubit_op)
from qiskit.circuit.library import RealAmplitudes
from qiskit.algorithms.optimizers import COBYLA
from qiskit.algorithms import NumPyMinimumEigensolver, VQE
from qiskit.opflow import PauliExpectation, CVaRExpectation
from qiskit import execute, Aer
# set classical optimizer
optimizer = COBYLA(maxiter=50)
# set variational ansatz
ansatz = RealAmplitudes(reps=1)
# set the backend
backend_name = "aer_simulator"
backend = QuantumInstance(
Aer.get_backend(backend_name),
shots=8192,
seed_transpiler=algorithm_globals.random_seed,
seed_simulator=algorithm_globals.random_seed,
)
counts = []
values = []
def store_intermediate_result(eval_count, parameters, mean, std):
counts.append(eval_count)
values.append(mean)
# initialize CVaR_alpha objective with alpha = 0.1
cvar_exp = CVaRExpectation(0.1, PauliExpectation())
# initialize VQE using CVaR
vqe = VQE(
expectation=cvar_exp,
optimizer=optimizer,
ansatz=ansatz,
quantum_instance=backend,
callback=store_intermediate_result,
)
raw_result = vqe.compute_minimum_eigenvalue(qubit_op)
print(raw_result)
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(counts, values)
plt.ylabel("Conformation Energy")
plt.xlabel("VQE Iterations")
fig.add_axes([0.44, 0.51, 0.44, 0.32])
plt.plot(counts[40:], values[40:])
plt.ylabel("Conformation Energy")
plt.xlabel("VQE Iterations")
plt.show()
result = protein_folding_problem.interpret(raw_result=raw_result)
print(
"The bitstring representing the shape of the protein during optimization is: ",
result.turns_sequence,
)
print("The expanded expression is:", result.get_result_binary_vector())
print(
f"The folded protein will have a main turns sequence of: {result.protein_shape_decoder.main_turns}"
)
print(f"and the following side turns sequences: {result.protein_shape_decoder.side_turns}")
print(result.protein_shape_file_gen.get_xyz_file())
%matplotlib notebook
fig = result.get_figure(title="Protein Structure", ticks=False, grid=False)
fig.show()
peptide = Peptide("APRLR", ["", "", "F", "Y", ""])
protein_folding_problem = ProteinFoldingProblem(peptide, mj_interaction, penalty_terms)
qubit_op = protein_folding_problem.qubit_op()
raw_result = vqe.compute_minimum_eigenvalue(qubit_op)
result_2 = protein_folding_problem.interpret(raw_result=raw_result)
%matplotlib notebook
fig = result_2.get_figure(title="Protein Structure", ticks=False, grid=False)
fig.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit import QuantumCircuit, transpile
from qiskit.opflow import PauliSumOp
from qiskit.circuit.library import PauliEvolutionGate
from qiskit.transpiler import Layout, CouplingMap, PassManager
from qiskit.transpiler.passes import FullAncillaAllocation
from qiskit.transpiler.passes import EnlargeWithAncilla
from qiskit.transpiler.passes import ApplyLayout
from qiskit.transpiler.passes import SetLayout
from qiskit.transpiler.passes.routing.commuting_2q_gate_routing import (
SwapStrategy,
FindCommutingPauliEvolutions,
Commuting2qGateRouter,
)
# Define the circuit on logical qubits
op = PauliSumOp.from_list([("IZZI", 1), ("ZIIZ", 2), ("ZIZI", 3)])
circ = QuantumCircuit(4)
circ.append(PauliEvolutionGate(op, 1), range(4))
circ.draw("mpl")
PassManager([FindCommutingPauliEvolutions()]).run(circ).draw("mpl")
swap_layers = (
((1, 3), ), # Layer 1: swap qubits 1 & 3
((0, 1), (3, 4)), # Layer 2: simultaneouosly swap qubits 0 & 1 and 3 & 4
((1, 3), ) # Layer 3: swap qubits 1 & 3
)
cmap = CouplingMap(couplinglist=[(0, 1), (1, 2), (1, 3), (3, 4)])
swap_strat = SwapStrategy(cmap, swap_layers)
print(f"Missing connectivity: {swap_strat.missing_couplings}")
backend_cmap = CouplingMap(couplinglist=[(0, 1), (1, 2), (1, 3), (3, 4)])
def make_passmanager(cmap, swap_strat, initial_layout):
"""Creates the passes needed."""
return PassManager(
[
FindCommutingPauliEvolutions(),
Commuting2qGateRouter(swap_strat),
SetLayout(initial_layout),
FullAncillaAllocation(backend_cmap),
EnlargeWithAncilla(),
ApplyLayout(),
]
)
# Define the swap strategy on qubits before the initial_layout is applied.
swap_cmap = CouplingMap(couplinglist=[(0, 1), (1, 2), (2, 3)])
swap_strat = SwapStrategy(swap_cmap, swap_layers=[[(1, 2)], [(0, 1), (2, 3)]])
# Chose qubits 0, 1, 3, and 4 from the backend coupling map shown above.
backend_cmap = CouplingMap(couplinglist=[(0, 1), (1, 2), (1, 3), (3, 4)])
initial_layout = Layout.from_intlist([0, 1, 3, 4], *circ.qregs)
pm = make_passmanager(backend_cmap, swap_strat, initial_layout)
# Insert swap gates, map to initial_layout and finally enlarge with ancilla.
pm.run(circ).draw("mpl")
pm.run(circ).decompose().decompose().draw("mpl")
op = PauliSumOp.from_list(
[
("IIIZZ", 1), ("IIZIZ", 1), ("IZIIZ", 1), ("ZIIIZ", 2),
("IIZZI", 3), ("IZIZI", 3), ("ZIIZI", 4),
("IZZII", 1), ("ZIZII", 2),
("ZZIII", 2),
]
)
circ = QuantumCircuit(5)
circ.append(PauliEvolutionGate(op, 1), range(5))
circ.draw("mpl")
swap_cmap = CouplingMap(couplinglist=[(0, 1), (1, 2), (1, 3), (3, 4)])
swap_strat = SwapStrategy(swap_cmap, swap_layers=[[(1, 3)], [(0, 1), (3, 4)], [(1, 3)]])
swap_strat.missing_couplings
initial_layout = Layout.from_intlist([0, 1, 2, 3, 4], *circ.qregs)
pm = make_passmanager(backend_cmap, swap_strat, initial_layout)
swapped_circ = pm.run(circ)
swapped_circ.draw("mpl")
tswapped = transpile(swapped_circ, basis_gates=["rz", "cx"], optimization_level=2)
n_cx = tswapped.count_ops()["cx"]
n_xsx = tswapped.count_ops().get("sx", 0) + tswapped.count_ops().get("x", 0)
print(f"Number of CNOTs: {n_cx}\nNumber of X and SX: {n_xsx}.")
print(f"Depth {tswapped.depth()}.")
tswapped.draw("mpl", fold=False)
opt3_circ = transpile(circ, basis_gates=["rz", "cx", "sx", "x"], optimization_level=3, coupling_map=backend_cmap)
n_cx = opt3_circ.count_ops()["cx"]
n_xsx = opt3_circ.count_ops().get("sx", 0) + tswapped.count_ops().get("x", 0)
print(f"Number of CNOTs: {n_cx}\nNumber of X and SX: {n_xsx}.")
print(f"Depth {opt3_circ.depth()}.")
opt3_circ.draw("mpl")
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_nature.algorithms import VQEUCCFactory
from qiskit_nature.algorithms.initial_points.mp2_initial_point import MP2InitialPoint
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import (
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.algorithms import GroundStateEigensolver
from qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer
molecule = Molecule(
geometry=[["H", [0.0, 0.0, 0.0]], ["H", [0.0, 0.0, 0.735]]], charge=0, multiplicity=1
)
driver = ElectronicStructureMoleculeDriver(
molecule, basis="sto3g", driver_type=ElectronicStructureDriverType.PYSCF
)
freeze_core = FreezeCoreTransformer()
problem = ElectronicStructureProblem(driver, transformers=[freeze_core])
qubit_converter = QubitConverter(JordanWignerMapper())
initial_point = MP2InitialPoint()
quantum_instance = QuantumInstance(backend=Aer.get_backend("aer_simulator_statevector"))
vqe_ucc_factory = VQEUCCFactory(quantum_instance, initial_point=initial_point)
vqe_solver = GroundStateEigensolver(qubit_converter, vqe_ucc_factory)
res = vqe_solver.solve(problem)
print(res)
from qiskit.algorithms.optimizers import L_BFGS_B
from qiskit.algorithms import VQE
from qiskit_nature.circuit.library import UCCSD
from qiskit_nature.settings import settings
from qiskit_nature.drivers import Molecule
settings.dict_aux_operators = True
molecule = Molecule(
geometry=[["H", [0.0, 0.0, 0.0]], ["H", [0.0, 0.0, 0.735]]],
charge=0,
multiplicity=1,
)
driver = ElectronicStructureMoleculeDriver(
molecule, basis="sto3g", driver_type=ElectronicStructureDriverType.PYSCF
)
problem = ElectronicStructureProblem(driver)
# generate the second-quantized operators
second_q_ops = problem.second_q_ops()
main_op = second_q_ops["ElectronicEnergy"]
driver_result = problem.grouped_property_transformed
particle_number = driver_result.get_property("ParticleNumber")
electronic_energy = driver_result.get_property("ElectronicEnergy")
num_particles = (particle_number.num_alpha, particle_number.num_beta)
num_spin_orbitals = particle_number.num_spin_orbitals
# setup the classical optimizer for VQE
optimizer = L_BFGS_B()
# setup the qubit converter
converter = QubitConverter(JordanWignerMapper())
# map to qubit operators
qubit_op = converter.convert(main_op, num_particles=num_particles)
# setup the ansatz for VQE
ansatz = UCCSD(
qubit_converter=converter,
num_particles=num_particles,
num_spin_orbitals=num_spin_orbitals,
)
mp2_initial_point = MP2InitialPoint()
mp2_initial_point.grouped_property = driver_result
mp2_initial_point.ansatz = ansatz
initial_point = mp2_initial_point.to_numpy_array()
# set the backend for the quantum computation
backend = Aer.get_backend("aer_simulator_statevector")
# setup and run VQE
algorithm = VQE(
ansatz, optimizer=optimizer, quantum_instance=backend, initial_point=initial_point
)
result = algorithm.compute_minimum_eigenvalue(qubit_op)
print(result.eigenvalue.real)
electronic_structure_result = problem.interpret(result)
print(electronic_structure_result)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import pprint
import numpy as np
import qiskit
from qiskit.transpiler import StagedPassManager, PassManager
from qiskit.transpiler.passes import *
from qiskit.transpiler import CouplingMap
from qiskit.transpiler.preset_passmanagers import common
from qiskit.circuit import QuantumCircuit
# Only available starting with Qiskit 0.21.0
print(qiskit.__version__)
default_staged_pm = StagedPassManager()
print(default_staged_pm.stages)
staged_pm = StagedPassManager(stages=['layout', 'translate'])
print(staged_pm.stages)
cmap = CouplingMap.from_heavy_hex(11)
layout_stage = PassManager([VF2Layout(cmap, call_limit=int(5e9))])
layout_stage += common.generate_embed_passmanager(cmap)
translation_stage = common.generate_translation_passmanager(None, ['u', 'cx'])
staged_pm = StagedPassManager(stages=['layout', 'translate'], layout=layout_stage)
staged_pm.translate = translation_stage
staged_pm.draw()
staged_pm.pre_layout = common.generate_unroll_3q(None, ['u', 'cx'])
staged_pm.post_layout = common.generate_pre_op_passmanager(coupling_map=cmap)
staged_pm.pre_translate = PassManager(Depth())
staged_pm.translate.append(Size())
staged_pm.draw()
from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager
from qiskit.providers.fake_provider import FakeGuadalupeV2
from qiskit.circuit.library import XGate, HGate, RXGate, PhaseGate, TGate, TdgGate
backend = FakeGuadalupeV2()
opt_level_1_pm = generate_preset_pass_manager(1, backend)
print(type(opt_level_1_pm))
opt_level_1_pm.draw()
circuit = QuantumCircuit(4)
circuit.h(0)
circuit.cx(0, 1)
circuit.cx(0, 1)
circuit.cx(0, 1)
circuit.cx(0, 2)
circuit.cx(0, 3)
circuit.measure_all()
circuit.draw('mpl')
opt_level_1_pm.run(circuit).draw('mpl')
backend_durations = backend.target.durations()
dd_sequence = [XGate(), XGate()]
scheduling_pm = PassManager([
ALAPScheduleAnalysis(backend_durations),
PadDynamicalDecoupling(backend_durations, dd_sequence),
])
inverse_gate_list = [
HGate(),
(RXGate(np.pi / 4), RXGate(-np.pi / 4)),
(PhaseGate(np.pi / 4), PhaseGate(-np.pi / 4)),
(TGate(), TdgGate()),
]
logical_opt = PassManager([
CXCancellation(),
InverseCancellation([HGate(), (RXGate(np.pi / 4), RXGate(-np.pi / 4))])
])
opt_level_1_pm.scheduling = scheduling_pm
opt_level_1_pm.pre_layout = logical_opt
opt_level_1_pm.draw()
opt_level_1_pm.run(circuit).draw('mpl')
from qiskit.converters import dag_to_circuit
def callback(**kwargs):
print(kwargs['pass_'])
intermediate_circuit = dag_to_circuit(kwargs['dag'])
intermediate_circuit.name = 'After:' +str(kwargs['pass_'])
display(intermediate_circuit.draw('mpl'))
opt_level_1_pm.run(circuit, callback=callback)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
# Install qiskit-terra=0.21 to use group_commuting()
# ! pip install qiskit-terra=0.21
from collections import defaultdict
from numbers import Number
from typing import Dict
from collections import defaultdict
import numpy as np
import retworkx as rx
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.custom_iterator import CustomIterator
from qiskit.quantum_info.operators.mixins import GroupMixin, LinearMixin
from qiskit.quantum_info.operators.symplectic.base_pauli import BasePauli
from qiskit.quantum_info.operators.symplectic.pauli import Pauli
from qiskit.quantum_info.operators.symplectic.pauli_table import PauliTable
from qiskit.quantum_info.operators.symplectic.stabilizer_table import StabilizerTable
from qiskit.quantum_info.operators.linear_op import LinearOp
from qiskit.utils.deprecation import deprecate_function
# if version incompativility occurs in terra 0.21, set PauliList and SparsePauliOp class below.
class PauliList(BasePauli, LinearMixin, GroupMixin):
r"""List of N-qubit Pauli operators.
This class is an efficient representation of a list of
:class:`Pauli` operators. It supports 1D numpy array indexing
returning a :class:`Pauli` for integer indexes or a
:class:`PauliList` for slice or list indices.
**Initialization**
A PauliList object can be initialized in several ways.
``PauliList(list[str])``
where strings are same representation with :class:`~qiskit.quantum_info.Pauli`.
``PauliList(Pauli) and PauliList(list[Pauli])``
where Pauli is :class:`~qiskit.quantum_info.Pauli`.
``PauliList.from_symplectic(z, x, phase)``
where ``z`` and ``x`` are 2 dimensional boolean ``numpy.ndarrays`` and ``phase`` is
an integer in ``[0, 1, 2, 3]``.
For example,
.. jupyter-execute::
import numpy as np
from qiskit.quantum_info import Pauli, PauliList
# 1. init from list[str]
pauli_list = PauliList(["II", "+ZI", "-iYY"])
print("1. ", pauli_list)
pauli1 = Pauli("iXI")
pauli2 = Pauli("iZZ")
# 2. init from Pauli
print("2. ", PauliList(pauli1))
# 3. init from list[Pauli]
print("3. ", PauliList([pauli1, pauli2]))
# 4. init from np.ndarray
z = np.array([[True, True], [False, False]])
x = np.array([[False, True], [True, False]])
phase = np.array([0, 1])
pauli_list = PauliList.from_symplectic(z, x, phase)
print("4. ", pauli_list)
**Data Access**
The individual Paulis can be accessed and updated using the ``[]``
operator which accepts integer, lists, or slices for selecting subsets
of PauliList. If integer is given, it returns Pauli not PauliList.
.. jupyter-execute::
pauli_list = PauliList(["XX", "ZZ", "IZ"])
print("Integer: ", repr(pauli_list[1]))
print("List: ", repr(pauli_list[[0, 2]]))
print("Slice: ", repr(pauli_list[0:2]))
**Iteration**
Rows in the Pauli table can be iterated over like a list. Iteration can
also be done using the label or matrix representation of each row using the
:meth:`label_iter` and :meth:`matrix_iter` methods.
"""
# Set the max number of qubits * paulis before string truncation
__truncate__ = 2000
def __init__(self, data):
"""Initialize the PauliList.
Args:
data (Pauli or list): input data for Paulis. If input is a list each item in the list
must be a Pauli object or Pauli str.
Raises:
QiskitError: if input array is invalid shape.
Additional Information:
The input array is not copied so multiple Pauli tables
can share the same underlying array.
"""
if isinstance(data, BasePauli):
base_z, base_x, base_phase = data._z, data._x, data._phase
elif isinstance(data, StabilizerTable):
# Conversion from legacy StabilizerTable
base_z, base_x, base_phase = self._from_array(data.Z, data.X, 2 * data.phase)
elif isinstance(data, PauliTable):
# Conversion from legacy PauliTable
base_z, base_x, base_phase = self._from_array(data.Z, data.X)
else:
# Conversion as iterable of Paulis
base_z, base_x, base_phase = self._from_paulis(data)
# Initialize BasePauli
super().__init__(base_z, base_x, base_phase)
# ---------------------------------------------------------------------
# Representation conversions
# ---------------------------------------------------------------------
@property
def settings(self):
"""Return settings."""
return {"data": self.to_labels()}
def __array__(self, dtype=None):
"""Convert to numpy array"""
# pylint: disable=unused-argument
shape = (len(self),) + 2 * (2**self.num_qubits,)
ret = np.zeros(shape, dtype=complex)
for i, mat in enumerate(self.matrix_iter()):
ret[i] = mat
return ret
@staticmethod
def _from_paulis(data):
"""Construct a PauliList from a list of Pauli data.
Args:
data (iterable): list of Pauli data.
Returns:
PauliList: the constructed PauliList.
Raises:
QiskitError: If the input list is empty or contains invalid
Pauli strings.
"""
if not isinstance(data, (list, tuple, set, np.ndarray)):
data = [data]
num_paulis = len(data)
if num_paulis == 0:
raise QiskitError("Input Pauli list is empty.")
paulis = []
for i in data:
if not isinstance(i, Pauli):
paulis.append(Pauli(i))
else:
paulis.append(i)
num_qubits = paulis[0].num_qubits
base_z = np.zeros((num_paulis, num_qubits), dtype=bool)
base_x = np.zeros((num_paulis, num_qubits), dtype=bool)
base_phase = np.zeros(num_paulis, dtype=int)
for i, pauli in enumerate(paulis):
base_z[i] = pauli._z
base_x[i] = pauli._x
base_phase[i] = pauli._phase
return base_z, base_x, base_phase
def __repr__(self):
"""Display representation."""
return self._truncated_str(True)
def __str__(self):
"""Print representation."""
return self._truncated_str(False)
def _truncated_str(self, show_class):
stop = self._num_paulis
if self.__truncate__:
max_paulis = self.__truncate__ // self.num_qubits
if self._num_paulis > max_paulis:
stop = max_paulis
labels = [str(self[i]) for i in range(stop)]
prefix = "PauliList(" if show_class else ""
tail = ")" if show_class else ""
if stop != self._num_paulis:
suffix = ", ...]" + tail
else:
suffix = "]" + tail
list_str = np.array2string(
np.array(labels), threshold=stop + 1, separator=", ", prefix=prefix, suffix=suffix
)
return prefix + list_str[:-1] + suffix
def __eq__(self, other):
"""Entrywise comparison of Pauli equality."""
if not isinstance(other, PauliList):
other = PauliList(other)
if not isinstance(other, BasePauli):
return False
return self._eq(other)
def equiv(self, other):
"""Entrywise comparison of Pauli equivalence up to global phase.
Args:
other (PauliList or Pauli): a comparison object.
Returns:
np.ndarray: An array of True or False for entrywise equivalence
of the current table.
"""
if not isinstance(other, PauliList):
other = PauliList(other)
return np.all(self.z == other.z, axis=1) & np.all(self.x == other.x, axis=1)
# ---------------------------------------------------------------------
# Direct array access
# ---------------------------------------------------------------------
@property
def phase(self):
"""Return the phase exponent of the PauliList."""
# Convert internal ZX-phase convention to group phase convention
return np.mod(self._phase - self._count_y(), 4)
@phase.setter
def phase(self, value):
# Convert group phase convetion to internal ZX-phase convention
self._phase[:] = np.mod(value + self._count_y(), 4)
@property
def x(self):
"""The x array for the symplectic representation."""
return self._x
@x.setter
def x(self, val):
self._x[:] = val
@property
def z(self):
"""The z array for the symplectic representation."""
return self._z
@z.setter
def z(self, val):
self._z[:] = val
# ---------------------------------------------------------------------
# Size Properties
# ---------------------------------------------------------------------
@property
def shape(self):
"""The full shape of the :meth:`array`"""
return self._num_paulis, self.num_qubits
@property
def size(self):
"""The number of Pauli rows in the table."""
return self._num_paulis
def __len__(self):
"""Return the number of Pauli rows in the table."""
return self._num_paulis
# ---------------------------------------------------------------------
# Pauli Array methods
# ---------------------------------------------------------------------
def __getitem__(self, index):
"""Return a view of the PauliList."""
# Returns a view of specified rows of the PauliList
# This supports all slicing operations the underlying array supports.
if isinstance(index, tuple):
if len(index) == 1:
index = index[0]
elif len(index) > 2:
raise IndexError(f"Invalid PauliList index {index}")
# Row-only indexing
if isinstance(index, (int, np.integer)):
# Single Pauli
return Pauli(
BasePauli(
self._z[np.newaxis, index],
self._x[np.newaxis, index],
self._phase[np.newaxis, index],
)
)
elif isinstance(index, (slice, list, np.ndarray)):
# Sub-Table view
return PauliList(BasePauli(self._z[index], self._x[index], self._phase[index]))
# Row and Qubit indexing
return PauliList((self._z[index], self._x[index], 0))
def __setitem__(self, index, value):
"""Update PauliList."""
if isinstance(index, tuple):
if len(index) == 1:
index = index[0]
elif len(index) > 2:
raise IndexError(f"Invalid PauliList index {index}")
# Modify specified rows of the PauliList
if not isinstance(value, PauliList):
value = PauliList(value)
self._z[index] = value._z
self._x[index] = value._x
if not isinstance(index, tuple):
# Row-only indexing
self._phase[index] = value._phase
else:
# Row and Qubit indexing
self._phase[index[0]] += value._phase
self._phase %= 4
def delete(self, ind, qubit=False):
"""Return a copy with Pauli rows deleted from table.
When deleting qubits the qubit index is the same as the
column index of the underlying :attr:`X` and :attr:`Z` arrays.
Args:
ind (int or list): index(es) to delete.
qubit (bool): if True delete qubit columns, otherwise delete
Pauli rows (Default: False).
Returns:
PauliList: the resulting table with the entries removed.
Raises:
QiskitError: if ind is out of bounds for the array size or
number of qubits.
"""
if isinstance(ind, int):
ind = [ind]
# Row deletion
if not qubit:
if max(ind) >= len(self):
raise QiskitError(
"Indices {} are not all less than the size"
" of the PauliList ({})".format(ind, len(self))
)
z = np.delete(self._z, ind, axis=0)
x = np.delete(self._x, ind, axis=0)
phase = np.delete(self._phase, ind)
return PauliList(BasePauli(z, x, phase))
# Column (qubit) deletion
if max(ind) >= self.num_qubits:
raise QiskitError(
"Indices {} are not all less than the number of"
" qubits in the PauliList ({})".format(ind, self.num_qubits)
)
z = np.delete(self._z, ind, axis=1)
x = np.delete(self._x, ind, axis=1)
# Use self.phase, not self._phase as deleting qubits can change the
# ZX phase convention
return PauliList.from_symplectic(z, x, self.phase)
def insert(self, ind, value, qubit=False):
"""Insert Pauli's into the table.
When inserting qubits the qubit index is the same as the
column index of the underlying :attr:`X` and :attr:`Z` arrays.
Args:
ind (int): index to insert at.
value (PauliList): values to insert.
qubit (bool): if True delete qubit columns, otherwise delete
Pauli rows (Default: False).
Returns:
PauliList: the resulting table with the entries inserted.
Raises:
QiskitError: if the insertion index is invalid.
"""
if not isinstance(ind, int):
raise QiskitError("Insert index must be an integer.")
if not isinstance(value, PauliList):
value = PauliList(value)
# Row insertion
size = self._num_paulis
if not qubit:
if ind > size:
raise QiskitError(
"Index {} is larger than the number of rows in the"
" PauliList ({}).".format(ind, size)
)
base_z = np.insert(self._z, ind, value._z, axis=0)
base_x = np.insert(self._x, ind, value._x, axis=0)
base_phase = np.insert(self._phase, ind, value._phase)
return PauliList(BasePauli(base_z, base_x, base_phase))
# Column insertion
if ind > self.num_qubits:
raise QiskitError(
"Index {} is greater than number of qubits"
" in the PauliList ({})".format(ind, self.num_qubits)
)
if len(value) == 1:
# Pad blocks to correct size
value_x = np.vstack(size * [value.x])
value_z = np.vstack(size * [value.z])
value_phase = np.vstack(size * [value.phase])
elif len(value) == size:
# Blocks are already correct size
value_x = value.x
value_z = value.z
value_phase = value.phase
else:
# Blocks are incorrect size
raise QiskitError(
"Input PauliList must have a single row, or"
" the same number of rows as the Pauli Table"
" ({}).".format(size)
)
# Build new array by blocks
z = np.hstack([self.z[:, :ind], value_z, self.z[:, ind:]])
x = np.hstack([self.x[:, :ind], value_x, self.x[:, ind:]])
phase = self.phase + value_phase
return PauliList.from_symplectic(z, x, phase)
def argsort(self, weight=False, phase=False):
"""Return indices for sorting the rows of the table.
The default sort method is lexicographic sorting by qubit number.
By using the `weight` kwarg the output can additionally be sorted
by the number of non-identity terms in the Pauli, where the set of
all Pauli's of a given weight are still ordered lexicographically.
Args:
weight (bool): Optionally sort by weight if True (Default: False).
phase (bool): Optionally sort by phase before weight or order
(Default: False).
Returns:
array: the indices for sorting the table.
"""
# Get order of each Pauli using
# I => 0, X => 1, Y => 2, Z => 3
x = self.x
z = self.z
order = 1 * (x & ~z) + 2 * (x & z) + 3 * (~x & z)
phases = self.phase
# Optionally get the weight of Pauli
# This is the number of non identity terms
if weight:
weights = np.sum(x | z, axis=1)
# To preserve ordering between successive sorts we
# are use the 'stable' sort method
indices = np.arange(self._num_paulis)
# Initial sort by phases
sort_inds = phases.argsort(kind="stable")
indices = indices[sort_inds]
order = order[sort_inds]
if phase:
phases = phases[sort_inds]
if weight:
weights = weights[sort_inds]
# Sort by order
for i in range(self.num_qubits):
sort_inds = order[:, i].argsort(kind="stable")
order = order[sort_inds]
indices = indices[sort_inds]
if weight:
weights = weights[sort_inds]
if phase:
phases = phases[sort_inds]
# If using weights we implement a sort by total number
# of non-identity Paulis
if weight:
sort_inds = weights.argsort(kind="stable")
indices = indices[sort_inds]
phases = phases[sort_inds]
# If sorting by phase we perform a final sort by the phase value
# of each pauli
if phase:
indices = indices[phases.argsort(kind="stable")]
return indices
def sort(self, weight=False, phase=False):
"""Sort the rows of the table.
The default sort method is lexicographic sorting by qubit number.
By using the `weight` kwarg the output can additionally be sorted
by the number of non-identity terms in the Pauli, where the set of
all Pauli's of a given weight are still ordered lexicographically.
**Example**
Consider sorting all a random ordering of all 2-qubit Paulis
.. jupyter-execute::
from numpy.random import shuffle
from qiskit.quantum_info.operators import PauliList
# 2-qubit labels
labels = ['II', 'IX', 'IY', 'IZ', 'XI', 'XX', 'XY', 'XZ',
'YI', 'YX', 'YY', 'YZ', 'ZI', 'ZX', 'ZY', 'ZZ']
# Shuffle Labels
shuffle(labels)
pt = PauliList(labels)
print('Initial Ordering')
print(pt)
# Lexicographic Ordering
srt = pt.sort()
print('Lexicographically sorted')
print(srt)
# Weight Ordering
srt = pt.sort(weight=True)
print('Weight sorted')
print(srt)
Args:
weight (bool): optionally sort by weight if True (Default: False).
phase (bool): Optionally sort by phase before weight or order
(Default: False).
Returns:
PauliList: a sorted copy of the original table.
"""
return self[self.argsort(weight=weight, phase=phase)]
def unique(self, return_index=False, return_counts=False):
"""Return unique Paulis from the table.
**Example**
.. jupyter-execute::
from qiskit.quantum_info.operators import PauliList
pt = PauliList(['X', 'Y', '-X', 'I', 'I', 'Z', 'X', 'iZ'])
unique = pt.unique()
print(unique)
Args:
return_index (bool): If True, also return the indices that
result in the unique array.
(Default: False)
return_counts (bool): If True, also return the number of times
each unique item appears in the table.
Returns:
PauliList: unique
the table of the unique rows.
unique_indices: np.ndarray, optional
The indices of the first occurrences of the unique values in
the original array. Only provided if ``return_index`` is True.
unique_counts: np.array, optional
The number of times each of the unique values comes up in the
original array. Only provided if ``return_counts`` is True.
"""
# Check if we need to stack the phase array
if np.any(self._phase != self._phase[0]):
# Create a single array of Pauli's and phases for calling np.unique on
# so that we treat different phased Pauli's as unique
array = np.hstack([self._z, self._x, self.phase.reshape((self.phase.shape[0], 1))])
else:
# All Pauli's have the same phase so we only need to sort the array
array = np.hstack([self._z, self._x])
# Get indexes of unique entries
if return_counts:
_, index, counts = np.unique(array, return_index=True, return_counts=True, axis=0)
else:
_, index = np.unique(array, return_index=True, axis=0)
# Sort the index so we return unique rows in the original array order
sort_inds = index.argsort()
index = index[sort_inds]
unique = PauliList(BasePauli(self._z[index], self._x[index], self._phase[index]))
# Concatinate return tuples
ret = (unique,)
if return_index:
ret += (index,)
if return_counts:
ret += (counts[sort_inds],)
if len(ret) == 1:
return ret[0]
return ret
# ---------------------------------------------------------------------
# BaseOperator methods
# ---------------------------------------------------------------------
def tensor(self, other):
"""Return the tensor product with each Pauli in the list.
Args:
other (PauliList): another PauliList.
Returns:
PauliList: the list of tensor product Paulis.
Raises:
QiskitError: if other cannot be converted to a PauliList, does
not have either 1 or the same number of Paulis as
the current list.
"""
if not isinstance(other, PauliList):
other = PauliList(other)
return PauliList(super().tensor(other))
def expand(self, other):
"""Return the expand product of each Pauli in the list.
Args:
other (PauliList): another PauliList.
Returns:
PauliList: the list of tensor product Paulis.
Raises:
QiskitError: if other cannot be converted to a PauliList, does
not have either 1 or the same number of Paulis as
the current list.
"""
if not isinstance(other, PauliList):
other = PauliList(other)
if len(other) not in [1, len(self)]:
raise QiskitError(
"Incompatible PauliLists. Other list must "
"have either 1 or the same number of Paulis."
)
return PauliList(super().expand(other))
def compose(self, other, qargs=None, front=False, inplace=False):
"""Return the composition self∘other for each Pauli in the list.
Args:
other (PauliList): another PauliList.
qargs (None or list): qubits to apply dot product on (Default: None).
front (bool): If True use `dot` composition method [default: False].
inplace (bool): If True update in-place (default: False).
Returns:
PauliList: the list of composed Paulis.
Raises:
QiskitError: if other cannot be converted to a PauliList, does
not have either 1 or the same number of Paulis as
the current list, or has the wrong number of qubits
for the specified qargs.
"""
if qargs is None:
qargs = getattr(other, "qargs", None)
if not isinstance(other, PauliList):
other = PauliList(other)
if len(other) not in [1, len(self)]:
raise QiskitError(
"Incompatible PauliLists. Other list must "
"have either 1 or the same number of Paulis."
)
return PauliList(super().compose(other, qargs=qargs, front=front, inplace=inplace))
# pylint: disable=arguments-differ
def dot(self, other, qargs=None, inplace=False):
"""Return the composition other∘self for each Pauli in the list.
Args:
other (PauliList): another PauliList.
qargs (None or list): qubits to apply dot product on (Default: None).
inplace (bool): If True update in-place (default: False).
Returns:
PauliList: the list of composed Paulis.
Raises:
QiskitError: if other cannot be converted to a PauliList, does
not have either 1 or the same number of Paulis as
the current list, or has the wrong number of qubits
for the specified qargs.
"""
return self.compose(other, qargs=qargs, front=True, inplace=inplace)
def _add(self, other, qargs=None):
"""Append two PauliLists.
If ``qargs`` are specified the other operator will be added
assuming it is identity on all other subsystems.
Args:
other (PauliList): another table.
qargs (None or list): optional subsystems to add on
(Default: None)
Returns:
PauliList: the concatenated list self + other.
"""
if qargs is None:
qargs = getattr(other, "qargs", None)
if not isinstance(other, PauliList):
other = PauliList(other)
self._op_shape._validate_add(other._op_shape, qargs)
base_phase = np.hstack((self._phase, other._phase))
if qargs is None or (sorted(qargs) == qargs and len(qargs) == self.num_qubits):
base_z = np.vstack([self._z, other._z])
base_x = np.vstack([self._x, other._x])
else:
# Pad other with identity and then add
padded = BasePauli(
np.zeros((other.size, self.num_qubits), dtype=bool),
np.zeros((other.size, self.num_qubits), dtype=bool),
np.zeros(other.size, dtype=int),
)
padded = padded.compose(other, qargs=qargs, inplace=True)
base_z = np.vstack([self._z, padded._z])
base_x = np.vstack([self._x, padded._x])
return PauliList(BasePauli(base_z, base_x, base_phase))
def _multiply(self, other):
"""Multiply each Pauli in the list by a phase.
Args:
other (complex or array): a complex number in [1, -1j, -1, 1j]
Returns:
PauliList: the list of Paulis other * self.
Raises:
QiskitError: if the phase is not in the set [1, -1j, -1, 1j].
"""
return PauliList(super()._multiply(other))
def conjugate(self):
"""Return the conjugate of each Pauli in the list."""
return PauliList(super().conjugate())
def transpose(self):
"""Return the transpose of each Pauli in the list."""
return PauliList(super().transpose())
def adjoint(self):
"""Return the adjoint of each Pauli in the list."""
return PauliList(super().adjoint())
def inverse(self):
"""Return the inverse of each Pauli in the list."""
return PauliList(super().adjoint())
# ---------------------------------------------------------------------
# Utility methods
# ---------------------------------------------------------------------
def commutes(self, other, qargs=None):
"""Return True for each Pauli that commutes with other.
Args:
other (PauliList): another PauliList operator.
qargs (list): qubits to apply dot product on (default: None).
Returns:
bool: True if Pauli's commute, False if they anti-commute.
"""
if qargs is None:
qargs = getattr(other, "qargs", None)
if not isinstance(other, BasePauli):
other = PauliList(other)
return super().commutes(other, qargs=qargs)
def anticommutes(self, other, qargs=None):
"""Return True if other Pauli that anticommutes with other.
Args:
other (PauliList): another PauliList operator.
qargs (list): qubits to apply dot product on (default: None).
Returns:
bool: True if Pauli's anticommute, False if they commute.
"""
return np.logical_not(self.commutes(other, qargs=qargs))
def commutes_with_all(self, other):
"""Return indexes of rows that commute other.
If other is a multi-row Pauli list the returned vector indexes rows
of the current PauliList that commute with *all* Pauli's in other.
If no rows satisfy the condition the returned array will be empty.
Args:
other (PauliList): a single Pauli or multi-row PauliList.
Returns:
array: index array of the commuting rows.
"""
return self._commutes_with_all(other)
def anticommutes_with_all(self, other):
"""Return indexes of rows that commute other.
If other is a multi-row Pauli list the returned vector indexes rows
of the current PauliList that anti-commute with *all* Pauli's in other.
If no rows satisfy the condition the returned array will be empty.
Args:
other (PauliList): a single Pauli or multi-row PauliList.
Returns:
array: index array of the anti-commuting rows.
"""
return self._commutes_with_all(other, anti=True)
def _commutes_with_all(self, other, anti=False):
"""Return row indexes that commute with all rows in another PauliList.
Args:
other (PauliList): a PauliList.
anti (bool): if True return rows that anti-commute, otherwise
return rows that commute (Default: False).
Returns:
array: index array of commuting or anti-commuting row.
"""
if not isinstance(other, PauliList):
other = PauliList(other)
comms = self.commutes(other[0])
(inds,) = np.where(comms == int(not anti))
for pauli in other[1:]:
comms = self[inds].commutes(pauli)
(new_inds,) = np.where(comms == int(not anti))
if new_inds.size == 0:
# No commuting rows
return new_inds
inds = inds[new_inds]
return inds
def evolve(self, other, qargs=None, frame="h"):
r"""Evolve the Pauli by a Clifford.
This returns the Pauli :math:`P^\prime = C.P.C^\dagger`.
By choosing the parameter frame='s', this function returns the Schrödinger evolution of the Pauli
:math:`P^\prime = C.P.C^\dagger`. This option yields a faster calculation.
Args:
other (Pauli or Clifford or QuantumCircuit): The Clifford operator to evolve by.
qargs (list): a list of qubits to apply the Clifford to.
frame (string): 'h' for Heisenberg or 's' for Schrödinger framework.
Returns:
Pauli: the Pauli :math:`C.P.C^\dagger`.
Raises:
QiskitError: if the Clifford number of qubits and qargs don't match.
"""
from qiskit.circuit import Instruction, QuantumCircuit
from qiskit.quantum_info.operators.symplectic.clifford import Clifford
if qargs is None:
qargs = getattr(other, "qargs", None)
if not isinstance(other, (BasePauli, Instruction, QuantumCircuit, Clifford)):
# Convert to a PauliList
other = PauliList(other)
return PauliList(super().evolve(other, qargs=qargs, frame=frame))
def to_labels(self, array=False):
r"""Convert a PauliList to a list Pauli string labels.
For large PauliLists converting using the ``array=True``
kwarg will be more efficient since it allocates memory for
the full Numpy array of labels in advance.
.. list-table:: Pauli Representations
:header-rows: 1
* - Label
- Symplectic
- Matrix
* - ``"I"``
- :math:`[0, 0]`
- :math:`\begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}`
* - ``"X"``
- :math:`[1, 0]`
- :math:`\begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}`
* - ``"Y"``
- :math:`[1, 1]`
- :math:`\begin{bmatrix} 0 & -i \\ i & 0 \end{bmatrix}`
* - ``"Z"``
- :math:`[0, 1]`
- :math:`\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}`
Args:
array (bool): return a Numpy array if True, otherwise
return a list (Default: False).
Returns:
list or array: The rows of the PauliList in label form.
"""
if (self.phase == 1).any():
prefix_len = 2
elif (self.phase > 0).any():
prefix_len = 1
else:
prefix_len = 0
str_len = self.num_qubits + prefix_len
ret = np.zeros(self.size, dtype=f"<U{str_len}")
iterator = self.label_iter()
for i in range(self.size):
ret[i] = next(iterator)
if array:
return ret
return ret.tolist()
def to_matrix(self, sparse=False, array=False):
r"""Convert to a list or array of Pauli matrices.
For large PauliLists converting using the ``array=True``
kwarg will be more efficient since it allocates memory a full
rank-3 Numpy array of matrices in advance.
.. list-table:: Pauli Representations
:header-rows: 1
* - Label
- Symplectic
- Matrix
* - ``"I"``
- :math:`[0, 0]`
- :math:`\begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}`
* - ``"X"``
- :math:`[1, 0]`
- :math:`\begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}`
* - ``"Y"``
- :math:`[1, 1]`
- :math:`\begin{bmatrix} 0 & -i \\ i & 0 \end{bmatrix}`
* - ``"Z"``
- :math:`[0, 1]`
- :math:`\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}`
Args:
sparse (bool): if True return sparse CSR matrices, otherwise
return dense Numpy arrays (Default: False).
array (bool): return as rank-3 numpy array if True, otherwise
return a list of Numpy arrays (Default: False).
Returns:
list: A list of dense Pauli matrices if `array=False` and `sparse=False`.
list: A list of sparse Pauli matrices if `array=False` and `sparse=True`.
array: A dense rank-3 array of Pauli matrices if `array=True`.
"""
if not array:
# We return a list of Numpy array matrices
return list(self.matrix_iter(sparse=sparse))
# For efficiency we also allow returning a single rank-3
# array where first index is the Pauli row, and second two
# indices are the matrix indices
dim = 2**self.num_qubits
ret = np.zeros((self.size, dim, dim), dtype=complex)
iterator = self.matrix_iter(sparse=sparse)
for i in range(self.size):
ret[i] = next(iterator)
return ret
# ---------------------------------------------------------------------
# Custom Iterators
# ---------------------------------------------------------------------
def label_iter(self):
"""Return a label representation iterator.
This is a lazy iterator that converts each row into the string
label only as it is used. To convert the entire table to labels use
the :meth:`to_labels` method.
Returns:
LabelIterator: label iterator object for the PauliList.
"""
class LabelIterator(CustomIterator):
"""Label representation iteration and item access."""
def __repr__(self):
return f"<PauliList_label_iterator at {hex(id(self))}>"
def __getitem__(self, key):
return self.obj._to_label(self.obj._z[key], self.obj._x[key], self.obj._phase[key])
return LabelIterator(self)
def matrix_iter(self, sparse=False):
"""Return a matrix representation iterator.
This is a lazy iterator that converts each row into the Pauli matrix
representation only as it is used. To convert the entire table to
matrices use the :meth:`to_matrix` method.
Args:
sparse (bool): optionally return sparse CSR matrices if True,
otherwise return Numpy array matrices
(Default: False)
Returns:
MatrixIterator: matrix iterator object for the PauliList.
"""
class MatrixIterator(CustomIterator):
"""Matrix representation iteration and item access."""
def __repr__(self):
return f"<PauliList_matrix_iterator at {hex(id(self))}>"
def __getitem__(self, key):
return self.obj._to_matrix(
self.obj._z[key], self.obj._x[key], self.obj._phase[key], sparse=sparse
)
return MatrixIterator(self)
# ---------------------------------------------------------------------
# Class methods
# ---------------------------------------------------------------------
@classmethod
def from_symplectic(cls, z, x, phase=0):
"""Construct a PauliList from a symplectic data.
Args:
z (np.ndarray): 2D boolean Numpy array.
x (np.ndarray): 2D boolean Numpy array.
phase (np.ndarray or None): Optional, 1D integer array from Z_4.
Returns:
PauliList: the constructed PauliList.
"""
base_z, base_x, base_phase = cls._from_array(z, x, phase)
return cls(BasePauli(base_z, base_x, base_phase))
def _noncommutation_graph(self, qubit_wise):
"""Create an edge list representing the non-commutation graph (Pauli Graph).
An edge (i, j) is present if i and j are not commutable.
Args:
qubit_wise (bool): whether the commutation rule is applied to the whole operator,
or on a per-qubit basis.
Returns:
List[Tuple(int,int)]: A list of pairs of indices of the PauliList that are not commutable.
"""
# convert a Pauli operator into int vector where {I: 0, X: 2, Y: 3, Z: 1}
mat1 = np.array(
[op.z + 2 * op.x for op in self],
dtype=np.int8,
)
mat2 = mat1[:, None]
qubit_commutation_mat = (mat1 * mat2) * (mat1 - mat2)
# adjacency_mat[i, j] is True if an edge (i, j) is present, i.e. i and j are non-commuting
if qubit_wise:
adjacency_mat = np.logical_or.reduce(qubit_commutation_mat, axis=2)
else:
adjacency_mat = np.logical_xor.reduce(qubit_commutation_mat, axis=2)
# convert into list where tuple elements are non-commuting operators
return list(zip(*np.where(np.triu(adjacency_mat, k=1))))
def _create_graph(self, qubit_wise):
"""Transform measurement operator grouping problem into graph coloring problem
Args:
qubit_wise (bool): whether the commutation rule is applied to the whole operator,
or on a per-qubit basis.
Returns:
retworkx.PyGraph: A class of undirected graphs
"""
edges = self._noncommutation_graph(qubit_wise)
graph = rx.PyGraph()
graph.add_nodes_from(range(self.size))
graph.add_edges_from_no_data(edges)
return graph
def group_qubit_wise_commuting(self):
"""Partition a PauliList into sets of mutually qubit-wise commuting Pauli strings.
Returns:
List[PauliList]: List of PauliLists where each PauliList contains commutable Pauli operators.
"""
return self.group_commuting(qubit_wise=True)
def group_commuting(self, qubit_wise=False):
"""Partition a PauliList into sets of commuting Pauli strings.
Args:
qubit_wise (bool): whether the commutation rule is applied to the whole operator,
or on a per-qubit basis. For example:
.. code-block:: python
>>> op = PauliList(["XX", "YY", "IZ", "ZZ"])
>>> op.group_commuting()
[PauliList(['XX', 'YY']), PauliList(['IZ', 'ZZ'])]
>>> op.group_commuting(qubit_wise=True)
[PauliList(['XX']), PauliList(['YY']), PauliList(['IZ', 'ZZ'])]
Returns:
List[PauliList]: List of PauliLists where each PauliList contains commuting Pauli operators.
"""
graph = self._create_graph(qubit_wise)
# Keys in coloring_dict are nodes, values are colors
coloring_dict = rx.graph_greedy_color(graph)
groups = defaultdict(list)
for idx, color in coloring_dict.items():
groups[color].append(idx)
return [self[group] for group in groups.values()]
class SparsePauliOp(LinearOp):
"""Sparse N-qubit operator in a Pauli basis representation.
This is a sparse representation of an N-qubit matrix
:class:`~qiskit.quantum_info.Operator` in terms of N-qubit
:class:`~qiskit.quantum_info.PauliList` and complex coefficients.
It can be used for performing operator arithmetic for hundred of qubits
if the number of non-zero Pauli basis terms is sufficiently small.
The Pauli basis components are stored as a
:class:`~qiskit.quantum_info.PauliList` object and can be accessed
using the :attr:`~SparsePauliOp.paulis` attribute. The coefficients
are stored as a complex Numpy array vector and can be accessed using
the :attr:`~SparsePauliOp.coeffs` attribute.
"""
def __init__(self, data, coeffs=None, *, ignore_pauli_phase=False, copy=True):
"""Initialize an operator object.
Args:
data (PauliList or SparsePauliOp or PauliTable or Pauli or list or str): Pauli list of
terms. A list of Pauli strings or a Pauli string is also allowed.
coeffs (np.ndarray): complex coefficients for Pauli terms.
.. note::
If ``data`` is a :obj:`~SparsePauliOp` and ``coeffs`` is not ``None``, the value
of the ``SparsePauliOp.coeffs`` will be ignored, and only the passed keyword
argument ``coeffs`` will be used.
ignore_pauli_phase (bool): if true, any ``phase`` component of a given :obj:`~PauliList`
will be assumed to be zero. This is more efficient in cases where a
:obj:`~PauliList` has been constructed purely for this object, and it is already
known that the phases in the ZX-convention are zero. It only makes sense to pass
this option when giving :obj:`~PauliList` data. (Default: False)
copy (bool): copy the input data if True, otherwise assign it directly, if possible.
(Default: True)
Raises:
QiskitError: If the input data or coeffs are invalid.
"""
if ignore_pauli_phase and not isinstance(data, PauliList):
raise QiskitError("ignore_pauli_list=True is only valid with PauliList data")
if isinstance(data, SparsePauliOp):
if coeffs is None:
coeffs = data.coeffs
data = data._pauli_list
# `SparsePauliOp._pauli_list` is already compatible with the internal ZX-phase
# convention. See `BasePauli._from_array` for the internal ZX-phase convention.
ignore_pauli_phase = True
pauli_list = PauliList(data.copy() if copy and hasattr(data, "copy") else data)
if coeffs is None:
coeffs = np.ones(pauli_list.size, dtype=complex)
else:
coeffs = np.array(coeffs, copy=copy, dtype=complex)
if ignore_pauli_phase:
# Fast path used in copy operations, where the phase of the PauliList is already known
# to be zero. This path only works if the input data is compatible with the internal
# ZX-phase convention.
self._pauli_list = pauli_list
self._coeffs = coeffs
else:
# move the phase of `pauli_list` to `self._coeffs`
phase = pauli_list.phase
self._coeffs = np.asarray((-1j) ** phase * coeffs, dtype=complex)
pauli_list._phase = np.mod(pauli_list._phase - phase, 4)
self._pauli_list = pauli_list
if self._coeffs.shape != (self._pauli_list.size,):
raise QiskitError(
"coeff vector is incorrect shape for number"
" of Paulis {} != {}".format(self._coeffs.shape, self._pauli_list.size)
)
# Initialize LinearOp
super().__init__(num_qubits=self._pauli_list.num_qubits)
def __array__(self, dtype=None):
if dtype:
return np.asarray(self.to_matrix(), dtype=dtype)
return self.to_matrix()
def __repr__(self):
prefix = "SparsePauliOp("
pad = len(prefix) * " "
return "{}{},\n{}coeffs={})".format(
prefix,
self.paulis.to_labels(),
pad,
np.array2string(self.coeffs, separator=", "),
)
def __eq__(self, other):
"""Entrywise comparison of two SparsePauliOp operators"""
return (
super().__eq__(other)
and self.coeffs.shape == other.coeffs.shape
and np.allclose(self.coeffs, other.coeffs)
and self.paulis == other.paulis
)
def equiv(self, other):
"""Check if two SparsePauliOp operators are equivalent.
Args:
other (SparsePauliOp): an operator object.
Returns:
bool: True if the operator is equivalent to ``self``.
"""
if not super().__eq__(other):
return False
return np.allclose((self - other).simplify().coeffs, [0])
@property
def settings(self) -> Dict:
"""Return settings."""
return {"data": self._pauli_list, "coeffs": self._coeffs}
# ---------------------------------------------------------------------
# Data accessors
# ---------------------------------------------------------------------
@property
def size(self):
"""The number of Pauli of Pauli terms in the operator."""
return self._pauli_list.size
def __len__(self):
"""Return the size."""
return self.size
# pylint: disable=bad-docstring-quotes
@property
@deprecate_function(
"The SparsePauliOp.table method is deprecated as of Qiskit Terra 0.19.0 "
"and will be removed no sooner than 3 months after the releasedate. "
"Use SparsePauliOp.paulis method instead.",
)
def table(self):
"""DEPRECATED - Return the the PauliTable."""
return PauliTable(np.column_stack((self.paulis.x, self.paulis.z)))
@table.setter
@deprecate_function(
"The SparsePauliOp.table method is deprecated as of Qiskit Terra 0.19.0 "
"and will be removed no sooner than 3 months after the releasedate. "
"Use SparsePauliOp.paulis method instead.",
)
def table(self, value):
if not isinstance(value, PauliTable):
value = PauliTable(value)
self._pauli_list = PauliList(value)
@property
def paulis(self):
"""Return the the PauliList."""
return self._pauli_list
@paulis.setter
def paulis(self, value):
if not isinstance(value, PauliList):
value = PauliList(value)
self._pauli_list = value
@property
def coeffs(self):
"""Return the Pauli coefficients."""
return self._coeffs
@coeffs.setter
def coeffs(self, value):
"""Set Pauli coefficients."""
self._coeffs[:] = value
def __getitem__(self, key):
"""Return a view of the SparsePauliOp."""
# Returns a view of specified rows of the PauliList
# This supports all slicing operations the underlying array supports.
if isinstance(key, (int, np.integer)):
key = [key]
return SparsePauliOp(self.paulis[key], self.coeffs[key])
def __setitem__(self, key, value):
"""Update SparsePauliOp."""
# Modify specified rows of the PauliList
if not isinstance(value, SparsePauliOp):
value = SparsePauliOp(value)
self.paulis[key] = value.paulis
self.coeffs[key] = value.coeffs
# ---------------------------------------------------------------------
# LinearOp Methods
# ---------------------------------------------------------------------
def conjugate(self):
# Conjugation conjugates phases and also Y.conj() = -Y
# Hence we need to multiply conjugated coeffs by -1
# for rows with an odd number of Y terms.
# Find rows with Ys
ret = self.transpose()
ret._coeffs = ret._coeffs.conj()
return ret
def transpose(self):
# The only effect transposition has is Y.T = -Y
# Hence we need to multiply coeffs by -1 for rows with an
# odd number of Y terms.
ret = self.copy()
minus = (-1) ** np.mod(np.sum(ret.paulis.x & ret.paulis.z, axis=1), 2)
ret._coeffs *= minus
return ret
def adjoint(self):
# Pauli's are self adjoint, so we only need to
# conjugate the phases
ret = self.copy()
ret._coeffs = ret._coeffs.conj()
return ret
def compose(self, other, qargs=None, front=False):
if qargs is None:
qargs = getattr(other, "qargs", None)
if not isinstance(other, SparsePauliOp):
other = SparsePauliOp(other)
# Validate composition dimensions and qargs match
self._op_shape.compose(other._op_shape, qargs, front)
if qargs is not None:
x1, z1 = self.paulis.x[:, qargs], self.paulis.z[:, qargs]
else:
x1, z1 = self.paulis.x, self.paulis.z
x2, z2 = other.paulis.x, other.paulis.z
num_qubits = other.num_qubits
# This method is the outer version of `BasePauli.compose`.
# `x1` and `z1` have shape `(self.size, num_qubits)`.
# `x2` and `z2` have shape `(other.size, num_qubits)`.
# `x1[:, no.newaxis]` results in shape `(self.size, 1, num_qubits)`.
# `ar = ufunc(x1[:, np.newaxis], x2)` will be in shape `(self.size, other.size, num_qubits)`.
# So, `ar.reshape((-1, num_qubits))` will be in shape `(self.size * other.size, num_qubits)`.
# Ref: https://numpy.org/doc/stable/user/theory.broadcasting.html
phase = np.add.outer(self.paulis._phase, other.paulis._phase).reshape(-1)
if front:
q = np.logical_and(x1[:, np.newaxis], z2).reshape((-1, num_qubits))
else:
q = np.logical_and(z1[:, np.newaxis], x2).reshape((-1, num_qubits))
# `np.mod` will be applied to `phase` in `SparsePauliOp.__init__`
phase = phase + 2 * np.sum(q, axis=1)
x3 = np.logical_xor(x1[:, np.newaxis], x2).reshape((-1, num_qubits))
z3 = np.logical_xor(z1[:, np.newaxis], z2).reshape((-1, num_qubits))
if qargs is None:
pauli_list = PauliList(BasePauli(z3, x3, phase))
else:
x4 = np.repeat(self.paulis.x, other.size, axis=0)
z4 = np.repeat(self.paulis.z, other.size, axis=0)
x4[:, qargs] = x3
z4[:, qargs] = z3
pauli_list = PauliList(BasePauli(z4, x4, phase))
# note: the following is a faster code equivalent to
# `coeffs = np.kron(self.coeffs, other.coeffs)`
# since `self.coeffs` and `other.coeffs` are both 1d arrays.
coeffs = np.multiply.outer(self.coeffs, other.coeffs).ravel()
return SparsePauliOp(pauli_list, coeffs, copy=False)
def tensor(self, other):
if not isinstance(other, SparsePauliOp):
other = SparsePauliOp(other)
return self._tensor(self, other)
def expand(self, other):
if not isinstance(other, SparsePauliOp):
other = SparsePauliOp(other)
return self._tensor(other, self)
@classmethod
def _tensor(cls, a, b):
paulis = a.paulis.tensor(b.paulis)
coeffs = np.kron(a.coeffs, b.coeffs)
return SparsePauliOp(paulis, coeffs, copy=False)
def _add(self, other, qargs=None):
if qargs is None:
qargs = getattr(other, "qargs", None)
if not isinstance(other, SparsePauliOp):
other = SparsePauliOp(other)
self._op_shape._validate_add(other._op_shape, qargs)
paulis = self.paulis._add(other.paulis, qargs=qargs)
coeffs = np.hstack((self.coeffs, other.coeffs))
return SparsePauliOp(paulis, coeffs, ignore_pauli_phase=True, copy=False)
def _multiply(self, other):
if not isinstance(other, Number):
raise QiskitError("other is not a number")
if other == 0:
# Check edge case that we deleted all Paulis
# In this case we return an identity Pauli with a zero coefficient
paulis = PauliList.from_symplectic(
np.zeros((1, self.num_qubits), dtype=bool),
np.zeros((1, self.num_qubits), dtype=bool),
)
coeffs = np.array([0j])
return SparsePauliOp(paulis, coeffs, ignore_pauli_phase=True, copy=False)
# Otherwise we just update the phases
return SparsePauliOp(
self.paulis.copy(), other * self.coeffs, ignore_pauli_phase=True, copy=False
)
# ---------------------------------------------------------------------
# Utility Methods
# ---------------------------------------------------------------------
def is_unitary(self, atol=None, rtol=None):
"""Return True if operator is a unitary matrix.
Args:
atol (float): Optional. Absolute tolerance for checking if
coefficients are zero (Default: 1e-8).
rtol (float): Optional. relative tolerance for checking if
coefficients are zero (Default: 1e-5).
Returns:
bool: True if the operator is unitary, False otherwise.
"""
# Get default atol and rtol
if atol is None:
atol = self.atol
if rtol is None:
rtol = self.rtol
# Compose with adjoint
val = self.compose(self.adjoint()).simplify()
# See if the result is an identity
return (
val.size == 1
and np.isclose(val.coeffs[0], 1.0, atol=atol, rtol=rtol)
and not np.any(val.paulis.x)
and not np.any(val.paulis.z)
)
def simplify(self, atol=None, rtol=None):
"""Simplify PauliList by combining duplicates and removing zeros.
Args:
atol (float): Optional. Absolute tolerance for checking if
coefficients are zero (Default: 1e-8).
rtol (float): Optional. relative tolerance for checking if
coefficients are zero (Default: 1e-5).
Returns:
SparsePauliOp: the simplified SparsePauliOp operator.
"""
# Get default atol and rtol
if atol is None:
atol = self.atol
if rtol is None:
rtol = self.rtol
# Filter non-zero coefficients
non_zero = np.logical_not(np.isclose(self.coeffs, 0, atol=atol, rtol=rtol))
paulis_x = self.paulis.x[non_zero]
paulis_z = self.paulis.z[non_zero]
nz_coeffs = self.coeffs[non_zero]
# Pack bool vectors into np.uint8 vectors by np.packbits
array = np.packbits(paulis_x, axis=1) * 256 + np.packbits(paulis_z, axis=1)
indexes, inverses = unordered_unique(array)
if np.all(non_zero) and indexes.shape[0] == array.shape[0]:
# No zero operator or duplicate operator
return self.copy()
coeffs = np.zeros(indexes.shape[0], dtype=complex)
np.add.at(coeffs, inverses, nz_coeffs)
# Delete zero coefficient rows
is_zero = np.isclose(coeffs, 0, atol=atol, rtol=rtol)
# Check edge case that we deleted all Paulis
# In this case we return an identity Pauli with a zero coefficient
if np.all(is_zero):
x = np.zeros((1, self.num_qubits), dtype=bool)
z = np.zeros((1, self.num_qubits), dtype=bool)
coeffs = np.array([0j], dtype=complex)
else:
non_zero = np.logical_not(is_zero)
non_zero_indexes = indexes[non_zero]
x = paulis_x[non_zero_indexes]
z = paulis_z[non_zero_indexes]
coeffs = coeffs[non_zero]
return SparsePauliOp(
PauliList.from_symplectic(z, x), coeffs, ignore_pauli_phase=True, copy=False
)
def chop(self, tol=1e-14):
"""Set real and imaginary parts of the coefficients to 0 if ``< tol`` in magnitude.
For example, the operator representing ``1+1e-17j X + 1e-17 Y`` with a tolerance larger
than ``1e-17`` will be reduced to ``1 X`` whereas :meth:`.SparsePauliOp.simplify` would
return ``1+1e-17j X``.
If a both the real and imaginary part of a coefficient is 0 after chopping, the
corresponding Pauli is removed from the operator.
Args:
tol (float): The absolute tolerance to check whether a real or imaginary part should
be set to 0.
Returns:
SparsePauliOp: This operator with chopped coefficients.
"""
realpart_nonzero = np.abs(self.coeffs.real) > tol
imagpart_nonzero = np.abs(self.coeffs.imag) > tol
remaining_indices = np.logical_or(realpart_nonzero, imagpart_nonzero)
remaining_real = realpart_nonzero[remaining_indices]
remaining_imag = imagpart_nonzero[remaining_indices]
if len(remaining_real) == 0: # if no Paulis are left
x = np.zeros((1, self.num_qubits), dtype=bool)
z = np.zeros((1, self.num_qubits), dtype=bool)
coeffs = np.array([0j], dtype=complex)
else:
coeffs = np.zeros(np.count_nonzero(remaining_indices), dtype=complex)
coeffs.real[remaining_real] = self.coeffs.real[realpart_nonzero]
coeffs.imag[remaining_imag] = self.coeffs.imag[imagpart_nonzero]
x = self.paulis.x[remaining_indices]
z = self.paulis.z[remaining_indices]
return SparsePauliOp(
PauliList.from_symplectic(z, x), coeffs, ignore_pauli_phase=True, copy=False
)
@staticmethod
def sum(ops):
"""Sum of SparsePauliOps.
This is a specialized version of the builtin ``sum`` function for SparsePauliOp
with smaller overhead.
Args:
ops (list[SparsePauliOp]): a list of SparsePauliOps.
Returns:
SparsePauliOp: the SparsePauliOp representing the sum of the input list.
Raises:
QiskitError: if the input list is empty.
QiskitError: if the input list includes an object that is not SparsePauliOp.
QiskitError: if the numbers of qubits of the objects in the input list do not match.
"""
if len(ops) == 0:
raise QiskitError("Input list is empty")
if not all(isinstance(op, SparsePauliOp) for op in ops):
raise QiskitError("Input list includes an object that is not SparsePauliOp")
for other in ops[1:]:
ops[0]._op_shape._validate_add(other._op_shape)
z = np.vstack([op.paulis.z for op in ops])
x = np.vstack([op.paulis.x for op in ops])
phase = np.hstack([op.paulis._phase for op in ops])
pauli_list = PauliList(BasePauli(z, x, phase))
coeffs = np.hstack([op.coeffs for op in ops])
return SparsePauliOp(pauli_list, coeffs, ignore_pauli_phase=True, copy=False)
# ---------------------------------------------------------------------
# Additional conversions
# ---------------------------------------------------------------------
@staticmethod
def from_operator(obj, atol=None, rtol=None):
"""Construct from an Operator objector.
Note that the cost of this construction is exponential as it involves
taking inner products with every element of the N-qubit Pauli basis.
Args:
obj (Operator): an N-qubit operator.
atol (float): Optional. Absolute tolerance for checking if
coefficients are zero (Default: 1e-8).
rtol (float): Optional. relative tolerance for checking if
coefficients are zero (Default: 1e-5).
Returns:
SparsePauliOp: the SparsePauliOp representation of the operator.
Raises:
QiskitError: if the input operator is not an N-qubit operator.
"""
# Get default atol and rtol
if atol is None:
atol = SparsePauliOp.atol
if rtol is None:
rtol = SparsePauliOp.rtol
if not isinstance(obj, Operator):
obj = Operator(obj)
# Check dimension is N-qubit operator
num_qubits = obj.num_qubits
if num_qubits is None:
raise QiskitError("Input Operator is not an N-qubit operator.")
data = obj.data
# Index of non-zero basis elements
inds = []
# Non-zero coefficients
coeffs = []
# Non-normalized basis factor
denom = 2**num_qubits
# Compute coefficients from basis
basis = pauli_basis(num_qubits, pauli_list=True)
for i, mat in enumerate(basis.matrix_iter()):
coeff = np.trace(mat.dot(data)) / denom
if not np.isclose(coeff, 0, atol=atol, rtol=rtol):
inds.append(i)
coeffs.append(coeff)
# Get Non-zero coeff PauliList terms
paulis = basis[inds]
return SparsePauliOp(paulis, coeffs, copy=False)
@staticmethod
def from_list(obj):
"""Construct from a list of Pauli strings and coefficients.
For example, the 5-qubit Hamiltonian
.. math::
H = Z_1 X_4 + 2 Y_0 Y_3
can be constructed as
.. code-block:: python
# via tuples and the full Pauli string
op = SparsePauliOp.from_list([("XIIZI", 1), ("IYIIY", 2)])
Args:
obj (Iterable[Tuple[str, complex]]): The list of 2-tuples specifying the Pauli terms.
Returns:
SparsePauliOp: The SparsePauliOp representation of the Pauli terms.
Raises:
QiskitError: If the list of Paulis is empty.
"""
obj = list(obj) # To convert zip or other iterable
size = len(obj) # number of Pauli terms
if size == 0:
raise QiskitError("Input Pauli list is empty.")
# determine the number of qubits
num_qubits = len(obj[0][0])
coeffs = np.zeros(size, dtype=complex)
labels = np.zeros(size, dtype=f"<U{num_qubits}")
for i, item in enumerate(obj):
labels[i] = item[0]
coeffs[i] = item[1]
paulis = PauliList(labels)
return SparsePauliOp(paulis, coeffs, copy=False)
@staticmethod
def from_sparse_list(obj, num_qubits):
"""Construct from a list of local Pauli strings and coefficients.
Each list element is a 3-tuple of a local Pauli string, indices where to apply it,
and a coefficient.
For example, the 5-qubit Hamiltonian
.. math::
H = Z_1 X_4 + 2 Y_0 Y_3
can be constructed as
.. code-block:: python
# via triples and local Paulis with indices
op = SparsePauliOp.from_sparse_list([("ZX", [1, 4], 1), ("YY", [0, 3], 2)], num_qubits=5)
# equals the following construction from "dense" Paulis
op = SparsePauliOp.from_list([("XIIZI", 1), ("IYIIY", 2)])
Args:
obj (Iterable[Tuple[str, List[int], complex]]): The list 3-tuples specifying the Paulis.
num_qubits (int): The number of qubits of the operator.
Returns:
SparsePauliOp: The SparsePauliOp representation of the Pauli terms.
Raises:
QiskitError: If the list of Paulis is empty.
QiskitError: If the number of qubits is incompatible with the indices of the Pauli terms.
"""
obj = list(obj) # To convert zip or other iterable
size = len(obj) # number of Pauli terms
if size == 0:
raise QiskitError("Input Pauli list is empty.")
coeffs = np.zeros(size, dtype=complex)
labels = np.zeros(size, dtype=f"<U{num_qubits}")
for i, (paulis, indices, coeff) in enumerate(obj):
# construct the full label based off the non-trivial Paulis and indices
label = ["I"] * num_qubits
for pauli, index in zip(paulis, indices):
if index >= num_qubits:
raise QiskitError(
f"The number of qubits ({num_qubits}) is smaller than a required index {index}."
)
label[~index] = pauli
labels[i] = "".join(label)
coeffs[i] = coeff
paulis = PauliList(labels)
return SparsePauliOp(paulis, coeffs, copy=False)
def to_list(self, array=False):
"""Convert to a list Pauli string labels and coefficients.
For operators with a lot of terms converting using the ``array=True``
kwarg will be more efficient since it allocates memory for
the full Numpy array of labels in advance.
Args:
array (bool): return a Numpy array if True, otherwise
return a list (Default: False).
Returns:
list or array: List of pairs (label, coeff) for rows of the PauliList.
"""
# Dtype for a structured array with string labels and complex coeffs
pauli_labels = self.paulis.to_labels(array=True)
labels = np.zeros(self.size, dtype=[("labels", pauli_labels.dtype), ("coeffs", "c16")])
labels["labels"] = pauli_labels
labels["coeffs"] = self.coeffs
if array:
return labels
return labels.tolist()
def to_matrix(self, sparse=False):
"""Convert to a dense or sparse matrix.
Args:
sparse (bool): if True return a sparse CSR matrix, otherwise
return dense Numpy array (Default: False).
Returns:
array: A dense matrix if `sparse=False`.
csr_matrix: A sparse matrix in CSR format if `sparse=True`.
"""
mat = None
for i in self.matrix_iter(sparse=sparse):
if mat is None:
mat = i
else:
mat += i
return mat
def to_operator(self):
"""Convert to a matrix Operator object"""
return Operator(self.to_matrix())
# ---------------------------------------------------------------------
# Custom Iterators
# ---------------------------------------------------------------------
def label_iter(self):
"""Return a label representation iterator.
This is a lazy iterator that converts each term in the SparsePauliOp
into a tuple (label, coeff). To convert the entire table to labels
use the :meth:`to_labels` method.
Returns:
LabelIterator: label iterator object for the PauliTable.
"""
class LabelIterator(CustomIterator):
"""Label representation iteration and item access."""
def __repr__(self):
return f"<SparsePauliOp_label_iterator at {hex(id(self))}>"
def __getitem__(self, key):
coeff = self.obj.coeffs[key]
pauli = self.obj.paulis.label_iter()[key]
return (pauli, coeff)
return LabelIterator(self)
def matrix_iter(self, sparse=False):
"""Return a matrix representation iterator.
This is a lazy iterator that converts each term in the SparsePauliOp
into a matrix as it is used. To convert to a single matrix use the
:meth:`to_matrix` method.
Args:
sparse (bool): optionally return sparse CSR matrices if True,
otherwise return Numpy array matrices
(Default: False)
Returns:
MatrixIterator: matrix iterator object for the PauliList.
"""
class MatrixIterator(CustomIterator):
"""Matrix representation iteration and item access."""
def __repr__(self):
return f"<SparsePauliOp_matrix_iterator at {hex(id(self))}>"
def __getitem__(self, key):
coeff = self.obj.coeffs[key]
mat = self.obj.paulis[key].to_matrix(sparse)
return coeff * mat
return MatrixIterator(self)
def _create_graph(self, qubit_wise):
"""Transform measurement operator grouping problem into graph coloring problem
Args:
qubit_wise (bool): whether the commutation rule is applied to the whole operator,
or on a per-qubit basis.
Returns:
retworkx.PyGraph: A class of undirected graphs
"""
edges = self.paulis._noncommutation_graph(qubit_wise)
graph = rx.PyGraph()
graph.add_nodes_from(range(self.size))
graph.add_edges_from_no_data(edges)
return graph
def group_commuting(self, qubit_wise=False):
"""Partition a SparsePauliOp into sets of commuting Pauli strings.
Args:
qubit_wise (bool): whether the commutation rule is applied to the whole operator,
or on a per-qubit basis. For example:
.. code-block:: python
>>> op = SparsePauliOp.from_list([("XX", 2), ("YY", 1), ("IZ",2j), ("ZZ",1j)])
>>> op.group_commuting()
[SparsePauliOp(["IZ", "ZZ"], coeffs=[0.+2.j, 0.+1j]),
SparsePauliOp(["XX", "YY"], coeffs=[2.+0.j, 1.+0.j])]
>>> op.group_commuting(qubit_wise=True)
[SparsePauliOp(['XX'], coeffs=[2.+0.j]),
SparsePauliOp(['YY'], coeffs=[1.+0.j]),
SparsePauliOp(['IZ', 'ZZ'], coeffs=[0.+2.j, 0.+1.j])]
Returns:
List[SparsePauliOp]: List of SparsePauliOp where each SparsePauliOp contains
commuting Pauli operators.
"""
graph = self._create_graph(qubit_wise)
# Keys in coloring_dict are nodes, values are colors
coloring_dict = rx.graph_greedy_color(graph)
groups = defaultdict(list)
for idx, color in coloring_dict.items():
groups[color].append(idx)
return [self[group] for group in groups.values()]
# ibm-quantum-spring-challenge-2022/exercise4/04.quantum_chemistry.ipynb
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import ElectronicStructureMoleculeDriver, ElectronicStructureDriverType
molecule = Molecule(
# coordinates are given in Angstrom
geometry=[
["O", [0.0, 0.0, 0.0]],
["H", [0.758602, 0.0, 0.504284]],
["H", [0.758602, 0.0, -0.504284]]
],
multiplicity=1, # = 2*spin + 1
charge=0,
)
driver = ElectronicStructureMoleculeDriver(
molecule=molecule,
basis="sto3g",
driver_type=ElectronicStructureDriverType.PYSCF,
)
properties = driver.run()
from qiskit_nature.transformers.second_quantization.electronic import ActiveSpaceTransformer
# Check the occupation of the spin orbitals
PN_property = properties.get_property("ParticleNumber")
print(PN_property)
# Define the active space around the Fermi level
# (selected automatically around the HOMO and LUMO, ordered by energy)
transformer = ActiveSpaceTransformer(
num_electrons=2, #how many electrons we have in our active space
num_molecular_orbitals=2, #how many orbitals we have in our active space
)
# We can hand-pick the MOs to be included in the AS
# (in case they are not exactly around the Fermi level)
# transformer = ActiveSpaceTransformer(
# num_electrons=2, #how amny electrons we have in our active space
# num_molecular_orbitals=2, #how many orbitals we have in our active space
# active_orbitals=[4,7], #We are specifically choosing MO number 4 (occupied with two electrons) and 7 (empty)
# )
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
problem = ElectronicStructureProblem(driver, [transformer])
second_q_ops = problem.second_q_ops() # this calls driver.run() internally
hamiltonian = second_q_ops[0]
print(hamiltonian)
from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper
from qiskit_nature.converters.second_quantization import QubitConverter
# Setup the mapper and qubit converter
mapper_type = 'JordanWignerMapper'
if mapper_type == 'ParityMapper':
mapper = ParityMapper()
elif mapper_type == 'JordanWignerMapper':
mapper = JordanWignerMapper()
elif mapper_type == 'BravyiKitaevMapper':
mapper = BravyiKitaevMapper()
converter = QubitConverter(mapper)
qubit_op = converter.convert(hamiltonian)
print(qubit_op)
print("No-grouping (Active Space)")
print("\n", f"Number of Measurement is {len(qubit_op)}")
print("Qubit-wise grouping")
measure_group = PauliList(qubit_op.primitive.paulis).group_commuting(qubit_wise=True)
for group in measure_group:
print(group)
print("\n", f"Number of Measurement is {len(measure_group)}")
print("General grouping")
measure_group = PauliList(qubit_op.primitive.paulis).group_commuting()
for group in measure_group:
print(group)
print("\n", f"Number of Measurement is {len(measure_group)}")
problem = ElectronicStructureProblem(driver, [])
second_q_ops = problem.second_q_ops() # this calls driver.run() internally
hamiltonian = second_q_ops[0]
# print(hamiltonian)
converter = QubitConverter(mapper)
qubit_op = converter.convert(hamiltonian)
print(qubit_op)
print("No-grouping (Active Space)")
print("\n", f"Number of Measurement is {len(qubit_op)}")
print("Qubit-wise grouping")
measure_group = PauliList(qubit_op.primitive.paulis).group_commuting(qubit_wise=True)
print("\n", f"Number of Measurement is {len(measure_group)}\n")
for group in measure_group:
print(group)
print("General grouping")
measure_group = PauliList(qubit_op.primitive.paulis).group_commuting()
print("\n", f"Number of Measurement is {len(measure_group)}\n")
for group in measure_group:
print(group)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import sys
from io import BytesIO
from qiskit import pulse, circuit, qpy
my_pulse = pulse.Gaussian(
circuit.Parameter("duration"),
circuit.Parameter("amp"),
circuit.Parameter("sigma"),
)
with pulse.build(name="my_gate") as my_gate:
pulse.shift_phase(1.57, pulse.DriveChannel(0))
pulse.play(my_pulse, pulse.DriveChannel(0))
file_like = BytesIO()
qpy.dump(my_gate, file_like)
file_like.seek(0)
load_gate = qpy.load(file_like)[0]
assert my_gate == load_gate
print(sys.getsizeof(file_like.getvalue()))
my_gate_op = circuit.Gate("my_gate", 1, my_gate.parameters)
my_circ = circuit.QuantumCircuit(1)
my_circ.append(my_gate_op, [0])
my_circ.measure_active()
my_circ.add_calibration(my_gate_op, (0,), my_gate)
file_like = BytesIO()
qpy.dump(my_circ, file_like)
file_like.seek(0)
load_circ = qpy.load(file_like)[0]
assert my_circ == load_circ
print(sys.getsizeof(file_like.getvalue()))
from qiskit.pulse.schedule import ScheduleBlock
from qiskit.pulse.transforms import AlignLeft
from qiskit.pulse.instructions import ShiftPhase, Play
from qiskit.pulse.channels import DriveChannel
context = AlignLeft()
channel = DriveChannel(0)
inst1 = ShiftPhase(1.57, channel)
inst2 = Play(my_pulse, channel)
sched_obj = ScheduleBlock(alignment_context=context)
sched_obj.append(inst1)
sched_obj.append(inst2)
import numpy as np
from qiskit.pulse.library.samplers.decorators import functional_pulse
@functional_pulse
def my_pulse(duration, amp, sigma):
center = duration / 2
return amp * np.exp(-((np.arange(duration)-center) / sigma) ** 2 / 2)
my_pulse(16, 0.1, 4)
from qiskit.pulse.library.parametric_pulses import Gaussian as ParametricGaussian
my_pulse_parametric = ParametricGaussian(16, 0.1, 4)
my_pulse_parametric
my_pulse_parametric_unassigned = ParametricGaussian(circuit.Parameter("duration"), 0.1, 4)
my_pulse_parametric_unassigned
from qiskit.pulse.library.discrete import gaussian
gaussian(16, 0.1, 4) == my_pulse_parametric.get_waveform()
from qiskit.pulse.library.symbolic_pulses import Gaussian as SymbolicGaussian
my_pulse_symbolic = SymbolicGaussian(16, 0.1, 4)
my_pulse_symbolic
my_pulse_parametric.get_waveform() == my_pulse_symbolic.get_waveform()
my_pulse_symbolic.envelope
my_pulse_symbolic._envelope_lam
from qiskit.pulse.library.symbolic_pulses import SymbolicPulse
from sympy import symbols, tanh
t, duration, sigma, amp = symbols("t duration sigma amp")
expr = amp * (tanh(t/sigma) + tanh((duration-t)/sigma) - 1)
constraints = duration > sigma
expr, constraints
my_tan_pulse = SymbolicPulse(
pulse_type="Tangential",
duration=160,
parameters={"amp": 0.1, "sigma": 40},
envelope=expr,
constraints=constraints,
valid_amp_conditions=amp < 1,
name="my_tan_pulse",
)
my_tan_pulse.draw()
%%timeit
SymbolicGaussian(160, 0.1, 40).get_waveform()
%%timeit
ParametricGaussian(160, 0.1, 40).get_waveform()
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_experiments.library.randomized_benchmarking import RBUtils
# Run old function with num_qubits = 2
num_qubits = 2
t1s = [100 for _ in range(num_qubits)]
t2s = [100 for _ in range(num_qubits)]
gate_length = 5
RBUtils.coherence_limit(
nQ=num_qubits,
T1_list=t1s,
T2_list=t2s,
gatelen=gate_length
)
# Run old function with num_qubits = 3
num_qubits = 3
t1s = [100 for _ in range(num_qubits)]
t2s = [100 for _ in range(num_qubits)]
gate_length = 5
RBUtils.coherence_limit(
nQ=num_qubits,
T1_list=t1s,
T2_list=t2s,
gatelen=gate_length
)
# Run new function with num_qubits = 2
num_qubits = 2
t1s = [100 for _ in range(num_qubits)]
t2s = [100 for _ in range(num_qubits)]
gate_length = 5
RBUtils.coherence_limit_error(
num_qubits=num_qubits,
gate_length=gate_length,
t1s=t1s,
t2s=t2s,
)
import numpy as np
np.isclose(
RBUtils.coherence_limit(
nQ=num_qubits,
T1_list=t1s,
T2_list=t2s,
gatelen=gate_length
),
RBUtils.coherence_limit_error(
num_qubits=num_qubits,
gate_length=gate_length,
t1s=t1s,
t2s=t2s,
),
15
)
# Run new function with num_qubits = 3
num_qubits = 3
t1s = [100 for _ in range(num_qubits)]
t2s = [100 for _ in range(num_qubits)]
gate_length = 5
RBUtils.coherence_limit_error(
num_qubits=num_qubits,
gate_length=gate_length,
t1s=t1s,
t2s=t2s,
)
# Run new function with num_qubits = 9
num_qubits = 9
t1s = [100 for _ in range(num_qubits)]
t2s = [100 for _ in range(num_qubits)]
gate_length = 5
RBUtils.coherence_limit_error(
num_qubits=num_qubits,
gate_length=gate_length,
t1s=t1s,
t2s=t2s,
)
def coherence_limit(nQ=2, T1_list=None, T2_list=None, gatelen=0.1):
T1 = np.array(T1_list)
if T2_list is None:
T2 = 2 * T1
else:
T2 = np.array(T2_list)
if len(T1) != nQ or len(T2) != nQ:
raise ValueError("T1 and/or T2 not the right length")
coherence_limit_err = 0
if nQ == 1:
coherence_limit_err = 0.5 * (
1.0 - 2.0 / 3.0 * np.exp(-gatelen / T2[0]) - 1.0 / 3.0 * np.exp(-gatelen / T1[0])
)
elif nQ == 2:
T1factor = 0
T2factor = 0
for i in range(2):
T1factor += 1.0 / 15.0 * np.exp(-gatelen / T1[i])
T2factor += (
2.0
/ 15.0
* (
np.exp(-gatelen / T2[i])
+ np.exp(-gatelen * (1.0 / T2[i] + 1.0 / T1[1 - i]))
)
)
T1factor += 1.0 / 15.0 * np.exp(-gatelen * np.sum(1 / T1))
T2factor += 4.0 / 15.0 * np.exp(-gatelen * np.sum(1 / T2))
coherence_limit_err = 0.75 * (1.0 - T1factor - T2factor)
else:
raise ValueError("Not a valid number of qubits")
return coherence_limit_err
def coherence_limit_error(
num_qubits: int, gate_length: float, t1s: Sequence, t2s: Optional[Sequence] = None
):
t1s = np.array(t1s)
if t2s is None:
t2s = 2 * t1s
else:
t2s = np.array([min(t2, 2 * t1) for t1, t2 in zip(t1s, t2s)])
if len(t1s) != num_qubits or len(t2s) != num_qubits:
raise ValueError("Length of t1s/t2s must equal num_qubits")
def thermal_relaxation_choi(t1, t2, time): # without excitation
return qi.Choi(
np.array(
[
[1, 0, 0, np.exp(-time / t2)],
[0, 0, 0, 0],
[0, 0, 1 - np.exp(-time / t1), 0],
[np.exp(-time / t2), 0, 0, np.exp(-time / t1)],
]
)
)
chois = [thermal_relaxation_choi(t1, t2, gate_length) for t1, t2 in zip(t1s, t2s)]
traces = [np.real(np.trace(np.array(qi.SuperOp(choi)))) for choi in chois]
d = 2**num_qubits
return d / (d + 1) * (1 - functools.reduce(operator.mul, traces) / (d * d))
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_nature.drivers.second_quantization import PySCFDriver
from qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
driver = PySCFDriver(atom="O 0.0 0.0 0.115; H 0.0 0.754 -0.459; H 0.0 -0.754 -0.459")
transformer = FreezeCoreTransformer()
problem = ElectronicStructureProblem(driver, [transformer])
second_q_ops = problem.second_q_ops()
# implicitly calls driver.run() EVERY time!
for name, op in second_q_ops.items():
print(f"{name}\n{op}\n")
hamiltonian = second_q_ops[problem.main_property_name]
print(problem.main_property_name)
print(hamiltonian)
from qiskit_nature.second_q.drivers import PySCFDriver
from qiskit_nature.second_q.transformers import FreezeCoreTransformer
driver = PySCFDriver(atom="O 0.0 0.0 0.115; H 0.0 0.754 -0.459; H 0.0 -0.754 -0.459")
transformer = FreezeCoreTransformer()
problem = driver.run()
transformed_problem = transformer.transform(problem)
hamiltonian, aux_ops = transformed_problem.second_q_ops()
print(hamiltonian)
for name, op in aux_ops.items():
print(f"{name}\n{op}\n")
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
%load_ext autoreload
%autoreload 2
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, open_docs
%metal_heading Welcome to Qiskit Metal!
from qiskit_metal.qlibrary.qubits.transmon_pocket_6 import TransmonPocket6
from qiskit_metal.qlibrary.tlines.meandered import RouteMeander
from qiskit_metal.qlibrary.tlines.pathfinder import RoutePathfinder
from qiskit_metal.qlibrary.tlines.straight_path import RouteStraight
from qiskit_metal.qlibrary.lumped.cap_n_interdigital import CapNInterdigital
from qiskit_metal.qlibrary.couplers.cap_n_interdigital_tee import CapNInterdigitalTee
from qiskit_metal.qlibrary.terminations.launchpad_wb import LaunchpadWirebond
design = metal.designs.DesignPlanar()
gui = metal.MetalGUI(design)
design.overwrite_enabled = True
design.chips.main
design.chips.main.size.size_x = '6mm'
design.chips.main.size.size_y = '6mm'
TransmonPocket6.get_template_options(design)
q_0 = TransmonPocket6(design,'Q_0', options = dict(
pos_x='-1.25mm',
pos_y='0.5mm',
gds_cell_name ='FakeJunction_01',
hfss_inductance ='14nH',
pad_width = '425 um',
pocket_height = '650um',
connection_pads=dict(
readout = dict(loc_W=0, loc_H=1, pad_width = '80um', pad_gap = '50um'),
bus_01 = dict(loc_W=1, loc_H=-1, pad_width = '60um', pad_gap = '10um'),
bus_02 = dict(loc_W=-1, loc_H=-1, pad_width = '60um', pad_gap = '10um')
)))
q_1 = TransmonPocket6(design,'Q_1', options = dict(
pos_x='1.25mm',
pos_y='0.5mm',
gds_cell_name ='FakeJunction_01',
hfss_inductance ='14nH',
pad_width = '425 um',
pocket_height = '650um',
connection_pads=dict(
readout = dict(loc_W=0, loc_H=1, pad_width = '80um', pad_gap = '50um'),
bus_01 = dict(loc_W=-1, loc_H=-1, pad_width = '60um', pad_gap = '10um'),
bus_12 = dict(loc_W=1, loc_H=-1, pad_width = '60um', pad_gap = '10um')
)))
q_2 = TransmonPocket6(design,'Q_2', options = dict(
pos_x='0mm',
pos_y='-1.35mm',
gds_cell_name ='FakeJunction_01',
hfss_inductance ='14nH',
pad_width = '425 um',
pocket_height = '650um',
connection_pads=dict(
readout = dict(loc_W=0, loc_H=-1, pad_width = '80um', pad_gap = '50um'),
bus_02 = dict(loc_W=-1, loc_H=1, pad_width = '60um', pad_gap = '10um'),
bus_12 = dict(loc_W=1, loc_H=1, pad_width = '60um', pad_gap = '10um')
)))
gui.rebuild()
gui.autoscale()
from qiskit_metal.analyses.em.cpw_calculations import guided_wavelength
def find_resonator_length(frequency, line_width, line_gap, N):
#frequency in GHz
#line_width/line_gap in um
#N -> 2 for lambda/2, 4 for lambda/4
#substrate dimensions and properties already set
[lambdaG, etfSqrt, q] = guided_wavelength(frequency*10**9, line_width*10**-6,
line_gap*10**-6, 750*10**-6, 200*10**-9, 11.9)
return str(lambdaG/N*10**3)+" mm"
?guided_wavelength
q_0.pins.keys()
find_resonator_length(5.8,10,6,2) #Note: What might be wrong with this length?
bus_01 = RouteMeander(design,'Bus_01', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_0',
pin='bus_01'),
end_pin=Dict(
component='Q_1',
pin='bus_01')
),
lead=Dict(
start_straight='125um',
end_straight = '125um'
),
meander=Dict(
asymmetry = '50um'),
fillet = "99um",
total_length = '10.3mm'))
gui.rebuild()
gui.autoscale()
bus_02 = RouteMeander(design,'Bus_02', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_0',
pin='bus_02'),
end_pin=Dict(
component='Q_2',
pin='bus_02')
),
lead=Dict(
start_straight='125um',
end_straight = '125um'
),
meander=Dict(
asymmetry = '50um'),
fillet = "99um",
total_length = '10mm'))
bus_12 = RouteMeander(design,'Bus_12', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_1',
pin='bus_12'),
end_pin=Dict(
component='Q_2',
pin='bus_12')
),
lead=Dict(
start_straight='125um',
end_straight = '125um'
),
meander=Dict(
asymmetry = '50um'),
fillet = "99um",
total_length = '9.7mm'))
gui.rebuild()
gui.autoscale()
from collections import OrderedDict
bus_01 = RouteMeander(design,'Bus_01', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_0',
pin='bus_01'),
end_pin=Dict(
component='Q_1',
pin='bus_01')
),
lead=Dict(
start_straight='125um',
end_straight = '125um'
),
meander=Dict(
asymmetry = '-350um'),
fillet = "99um",
total_length = '10.3mm'))
jogs_start = OrderedDict()
jogs_start[0] = ["L", '350um']
jogs_start[1] = ["L", '500um']
bus_02 = RouteMeander(design,'Bus_02', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_0',
pin='bus_02'),
end_pin=Dict(
component='Q_2',
pin='bus_02')
),
lead=Dict(
start_straight='125um',
end_straight = '225um',
start_jogged_extension=jogs_start,
#end_jogged_extension = jogs_end
),
meander=Dict(
asymmetry = '-50um'),
fillet = "99um",
total_length = '10mm'))
jogs_start = OrderedDict()
jogs_start[0] = ["R", '350um']
jogs_start[1] = ["R", '700um']
bus_12 = RouteMeander(design,'Bus_12', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_1',
pin='bus_12'),
end_pin=Dict(
component='Q_2',
pin='bus_12')
),
lead=Dict(
start_straight='325um',
end_straight = '25um',
start_jogged_extension=jogs_start
),
meander=Dict(
asymmetry = '50um'),
fillet = "99um",
total_length = '9.7mm'))
gui.rebuild()
gui.autoscale()
launch_readout_q_0 = LaunchpadWirebond(design, 'Launch_Readout_Q_0',
options = dict(pos_x = '-2mm',
pos_y ='2.5mm',
orientation = '-90'))
launch_readout_q_1 = LaunchpadWirebond(design, 'Launch_Readout_Q_1',
options = dict(pos_x = '2mm',
pos_y ='2.5mm',
orientation = '-90'))
launch_readout_q_2 = LaunchpadWirebond(design, 'Launch_Readout_Q_2',
options = dict(pos_x = '-2mm',
pos_y ='-2.5mm',
orientation = '90'))
gui.rebuild()
gui.autoscale()
cap_readout_q_2 = CapNInterdigital(design,'Cap_Readout_Q_2', options = dict(pos_x = '-2mm', pos_y ='-2.25mm', orientation = '0'))
cap_readout_q_0 = CapNInterdigitalTee(design,'Cap_Readout_Q_0', options=dict(pos_x = '-1.5mm', pos_y = '2.25mm',
orientation = '0'))
cap_readout_q_1 = CapNInterdigitalTee(design,'Cap_Readout_Q_1', options=dict(pos_x = '1.5mm', pos_y = '2.25mm',
orientation = '0'))
gui.rebuild()
gui.autoscale()
find_resonator_length(7.2,10,6,2)
jogs_start = OrderedDict()
jogs_start[0] = ["L", '350um']
#jogs_start[1] = ["R", '700um']
readout_0 = RouteMeander(design,'Readout_0', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_0',
pin='readout'),
end_pin=Dict(
component='Cap_Readout_Q_0',
pin='second_end')
),
lead=Dict(
start_straight='125um',
end_straight = '25um',
start_jogged_extension=jogs_start,
#end_jogged_extension = jogs_end
),
meander=Dict(
asymmetry = '50um'),
fillet = "99um",
total_length = '8.8mm'))
jogs_start = OrderedDict()
jogs_start[0] = ["R", '350um']
readout_1 = RouteMeander(design,'Readout_1', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_1',
pin='readout'),
end_pin=Dict(
component='Cap_Readout_Q_1',
pin='second_end')
),
lead=Dict(
start_straight='125um',
end_straight = '25um',
start_jogged_extension=jogs_start,
#end_jogged_extension = jogs_end
),
meander=Dict(
asymmetry = '50um'),
fillet = "99um",
total_length = '8.5mm'))
readout_2 = RouteMeander(design,'Readout_2', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Q_2',
pin='readout'),
end_pin=Dict(
component='Cap_Readout_Q_2',
pin='north_end')
),
lead=Dict(
start_straight='125um',
end_straight = '25um',
),
meander=Dict(
asymmetry = '200um'),
fillet = "99um",
total_length = '8.3mm'))
gui.rebuild()
gui.autoscale()
tl_readout_q_2= RouteStraight(design,'TL_Readout_Q_2', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Cap_Readout_Q_2',
pin='south_end'),
end_pin=Dict(
component='Launch_Readout_Q_2',
pin='tie')),
trace_width = '10um',
trace_gap = '6um'
))
tl_readout_q_0 = RoutePathfinder(design,'TL_Readout_Q_0', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Cap_Readout_Q_0',
pin='prime_start'),
end_pin=Dict(
component='Launch_Readout_Q_0',
pin='tie')),
fillet = '99um',
trace_width = '10um',
trace_gap = '6um'
))
tl_readout_q_1 = RoutePathfinder(design,'TL_Readout_Q_1', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Cap_Readout_Q_1',
pin='prime_end'),
end_pin=Dict(
component='Launch_Readout_Q_1',
pin='tie')),
fillet = '99um',
trace_width = '10um',
trace_gap = '6um'
))
tl_readout_q_01= RouteStraight(design,'TL_Readout_Q_01', options = dict(hfss_wire_bonds = True,
pin_inputs=Dict(
start_pin=Dict(
component='Cap_Readout_Q_0',
pin='prime_end'),
end_pin=Dict(
component='Cap_Readout_Q_1',
pin='prime_start')),
trace_width = '10um',
trace_gap = '6um'
))
gui.rebuild()
gui.autoscale()
from qiskit_metal.analyses.quantization import LOManalysis
q_0_LOM = LOManalysis(design, "q3d")
q_0_LOM.sim.setup
q_0_LOM.setup
q_0_LOM.setup.freq_readout = 6.8
q_0_LOM.setup.freq_bus = [5.8,6.0]
q_0_LOM.setup
q_0.pins.keys()
q_0_LOM.run(components=['Q_0'], open_terminations=[('Q_0', 'readout'), ('Q_0', 'bus_01'), ('Q_0', 'bus_02')])
q_0_LOM.plot_convergence();
q_0_LOM.plot_convergence_chi()
q_0_LOM.sim.setup.percent_error = 0.1
q_0_LOM.sim.setup.max_passes = 20
q_0_LOM.run()
q_0_LOM.plot_convergence();
q_0_LOM.plot_convergence_chi()
q_0_LOM.sim.capacitance_matrix
q_0_LOM.lumped_oscillator
q_0.options.pad_gap = '28um'
q_0.options.connection_pads.readout.pad_gap = '22um'
q_0.options.connection_pads.readout.pad_width = '100um'
q_0.options.connection_pads.bus_01.pad_width = '135um'
q_0.options.connection_pads.bus_02.pad_width = '135um'
q_0_LOM.setup.junctions.Lj = 14.9
gui.rebuild()
q_0_LOM.run(name="Q_0_LOM_2",components=['Q_0'], open_terminations=[('Q_0', 'readout'), ('Q_0', 'bus_01'), ('Q_0', 'bus_02')])
q_0_LOM.lumped_oscillator
q_0.options.hfss_inductance = '14.9nH'
gui.rebuild()
q_0_LOM.sim.close()
from qiskit_metal.analyses.quantization import EPRanalysis
bus_02_EPR = EPRanalysis(design, "hfss")
bus_02_EPR.sim.setup
bus_02_EPR.sim.setup.n_modes = 2
bus_02_EPR.sim.run_sim(name="Bus_02_Sim", components=['Q_0','Q_2','Bus_02'],
ignored_jjs = [('Q_0','rect_jj'),('Q_2','rect_jj')],
box_plus_buffer = False)
bus_02_EPR.sim.plot_convergences()
bus_02_EPR.get_frequencies()
bus_02_EPR.sim.plot_fields('main')
#bus_02_EPR.sim.save_screenshot()
from qiskit_metal.analyses.sweep_and_optimize.sweeping import Sweeping
sweep = Sweeping(design)
?sweep.sweep_one_option_get_eigenmode_solution_data
em_setup_args = Dict(min_freq_ghz=3,
n_modes=1,
max_passes=12,
max_delta_f = 0.1)
all_sweeps, return_code = sweep.sweep_one_option_get_eigenmode_solution_data(
'Bus_02',
'total_length',
['9.4mm', '9.3mm', '9.2mm'],
['Bus_02','Q_0','Q_2'],
[],
[('Q_0','rect_jj'),('Q_2','rect_jj')],
design_name="GetEigenModeSolution",
setup_args=em_setup_args)
frequency = {}
for key in all_sweeps.keys():
frequency[key] = all_sweeps[key]['frequency']
frequency
bus_02.options.total_length = '9.35mm'
bus_02_EPR.sim.close()
q_1.options.pad_gap = '28um'
q_1.options.connection_pads.readout.pad_gap = '20um'
q_1.options.connection_pads.readout.pad_width = '100um'
q_1.options.connection_pads.bus_12.pad_width = '130um'
q_1.options.hfss_inductance = '13.9nH'
bus_01.options.total_length = '9.55mm'
gui.rebuild()
three_mode_EPR = EPRanalysis(design, "hfss")
three_mode_EPR.sim.setup.max_passes = 18
three_mode_EPR.sim.setup.max_delta_f = 0.025
three_mode_EPR.sim.setup.n_modes = 3
three_mode_EPR.sim.setup.vars = Dict(Lj0= '14.9 nH', Cj0= '0 fF',
Lj1= '13.9 nH', Cj1= '0 fF')
three_mode_EPR.sim.run_sim(name="Three_Mode",
components=['Q_0', 'Q_1', 'Bus_01'])
three_mode_EPR.sim.plot_convergences()
del three_mode_EPR.setup.junctions['jj']
three_mode_EPR.setup.junctions.jj0 = Dict(rect='JJ_rect_Lj_Q_0_rect_jj', line='JJ_Lj_Q_0_rect_jj_',
Lj_variable='Lj0', Cj_variable='Cj0')
three_mode_EPR.setup.junctions.jj1 = Dict(rect='JJ_rect_Lj_Q_1_rect_jj', line='JJ_Lj_Q_1_rect_jj_',
Lj_variable='Lj1', Cj_variable='Cj1')
three_mode_EPR.setup.sweep_variable = 'Lj0'
three_mode_EPR.setup
three_mode_EPR.setup
three_mode_EPR.run_epr()
three_mode_EPR.sim.close()
mt_gds = design.renderers.gds
mt_gds.options
mt_gds.options.path_filename = '../resources/Fake_Junctions.GDS'
mt_gds.options.no_cheese.buffer = '40um'
mt_gds.options.max_points = 2555
mt_gds.options.cheese.cheese_0_x = '3um'
mt_gds.options.cheese.cheese_0_y = '3um'
mt_gds.options.cheese.delta_x = '50um'
mt_gds.options.cheese.delta_y = '50um'
mt_gds.options.cheese.view_in_file= {'main': {1: True}}
mt_gds.options.fabricate = True
mt_gds.export_to_gds("MetalTutorial.gds")
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import clear_output
from qiskit import QuantumCircuit, Aer
from qiskit.algorithms.optimizers import COBYLA
from qiskit.circuit import ParameterVector
from qiskit.circuit.library import ZFeatureMap
from qiskit.opflow import AerPauliExpectation, PauliSumOp
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit_machine_learning.algorithms.classifiers import NeuralNetworkClassifier
from qiskit_machine_learning.neural_networks import TwoLayerQNN
from sklearn.model_selection import train_test_split
algorithm_globals.random_seed = 12345
# We now define a two qubit unitary as defined in [3]
def conv_circuit(params):
target = QuantumCircuit(2)
target.rz(-np.pi / 2, 1)
target.cx(1, 0)
target.rz(params[0], 0)
target.ry(params[1], 1)
target.cx(0, 1)
target.ry(params[2], 1)
target.cx(1, 0)
target.rz(np.pi / 2, 0)
return target
# Let's draw this circuit and see what it looks like
params = ParameterVector("θ", length=3)
circuit = conv_circuit(params)
circuit.draw("mpl")
def conv_layer(num_qubits, param_prefix):
qc = QuantumCircuit(num_qubits, name="Convolutional Layer")
qubits = list(range(num_qubits))
param_index = 0
params = ParameterVector(param_prefix, length=num_qubits * 3)
for q1, q2 in zip(qubits[0::2], qubits[1::2]):
qc = qc.compose(conv_circuit(params[param_index : (param_index + 3)]), [q1, q2])
qc.barrier()
param_index += 3
for q1, q2 in zip(qubits[1::2], qubits[2::2] + [0]):
qc = qc.compose(conv_circuit(params[param_index : (param_index + 3)]), [q1, q2])
qc.barrier()
param_index += 3
qc_inst = qc.to_instruction()
qc = QuantumCircuit(num_qubits)
qc.append(qc_inst, qubits)
return qc
circuit = conv_layer(4, "θ")
circuit.decompose().draw("mpl")
def pool_circuit(params):
target = QuantumCircuit(2)
target.rz(-np.pi / 2, 1)
target.cx(1, 0)
target.rz(params[0], 0)
target.ry(params[1], 1)
target.cx(0, 1)
target.ry(params[2], 1)
return target
params = ParameterVector("θ", length=3)
circuit = pool_circuit(params)
circuit.draw("mpl")
def pool_layer(sources, sinks, param_prefix):
num_qubits = len(sources) + len(sinks)
qc = QuantumCircuit(num_qubits, name="Pooling Layer")
param_index = 0
params = ParameterVector(param_prefix, length=num_qubits // 2 * 3)
for source, sink in zip(sources, sinks):
qc = qc.compose(pool_circuit(params[param_index : (param_index + 3)]), [source, sink])
qc.barrier()
param_index += 3
qc_inst = qc.to_instruction()
qc = QuantumCircuit(num_qubits)
qc.append(qc_inst, range(num_qubits))
return qc
sources = [0, 1]
sinks = [2, 3]
circuit = pool_layer(sources, sinks, "θ")
circuit.decompose().draw("mpl")
def generate_dataset(num_images):
images = []
labels = []
hor_array = np.zeros((6, 8))
ver_array = np.zeros((4, 8))
j = 0
for i in range(0, 7):
if i != 3:
hor_array[j][i] = np.pi / 2
hor_array[j][i + 1] = np.pi / 2
j += 1
j = 0
for i in range(0, 4):
ver_array[j][i] = np.pi / 2
ver_array[j][i + 4] = np.pi / 2
j += 1
for n in range(num_images):
rng = algorithm_globals.random.integers(0, 2)
if rng == 0:
labels.append(-1)
random_image = algorithm_globals.random.integers(0, 6)
images.append(np.array(hor_array[random_image]))
elif rng == 1:
labels.append(1)
random_image = algorithm_globals.random.integers(0, 4)
images.append(np.array(ver_array[random_image]))
# Create noise
for i in range(8):
if images[-1][i] == 0:
images[-1][i] = algorithm_globals.random.uniform(0, np.pi / 4)
return images, labels
images, labels = generate_dataset(50)
train_images, test_images, train_labels, test_labels = train_test_split(
images, labels, test_size=0.3
)
fig, ax = plt.subplots(2, 2, figsize=(10, 6), subplot_kw={"xticks": [], "yticks": []})
for i in range(4):
ax[i // 2, i % 2].imshow(
train_images[i].reshape(2, 4), # Change back to 2 by 4
aspect="equal",
)
plt.subplots_adjust(wspace=0.1, hspace=0.025)
fig, ax = plt.subplots(2, 2, figsize=(10, 6), subplot_kw={"xticks": [], "yticks": []})
for i in range(4):
ax[i // 2, i % 2].imshow(
train_images[i].reshape(2, 4), # Change back to 2 by 4
aspect="equal",
)
plt.subplots_adjust(wspace=0.1, hspace=0.025)
feature_map = ZFeatureMap(8)
feature_map.decompose().draw("mpl")
quantum_instance = QuantumInstance(Aer.get_backend("aer_simulator"), shots=1024)
feature_map = ZFeatureMap(8)
ansatz = QuantumCircuit(8, name="Ansatz")
# First Convolutional Layer
ansatz.compose(conv_layer(8, "с1"), list(range(8)), inplace=True)
# First Pooling Layer
ansatz.compose(pool_layer([0, 1, 2, 3], [4, 5, 6, 7], "p1"), list(range(8)), inplace=True)
# Second Convolutional Layer
ansatz.compose(conv_layer(4, "c2"), list(range(4, 8)), inplace=True)
# Second Pooling Layer
ansatz.compose(pool_layer([0, 1], [2, 3], "p2"), list(range(4, 8)), inplace=True)
# Third Convolutional Layer
ansatz.compose(conv_layer(2, "c3"), list(range(6, 8)), inplace=True)
# Third Pooling Layer
ansatz.compose(pool_layer([0], [1], "p3"), list(range(6, 8)), inplace=True)
# Combining the feature map and ansatz
circuit = QuantumCircuit(8)
circuit.compose(feature_map, range(8), inplace=True)
circuit.compose(ansatz, range(8), inplace=True)
observable = PauliSumOp.from_list([("Z" + "I" * 7, 1)])
qnn = TwoLayerQNN(
num_qubits=8,
feature_map=feature_map,
ansatz=ansatz,
observable=observable,
exp_val=AerPauliExpectation(),
quantum_instance=quantum_instance,
)
circuit.draw("mpl")
def callback_graph(weights, obj_func_eval):
clear_output(wait=True)
objective_func_vals.append(obj_func_eval)
plt.title("Objective function value against iteration")
plt.xlabel("Iteration")
plt.ylabel("Objective function value")
plt.plot(range(len(objective_func_vals)), objective_func_vals)
plt.show()
opflow_classifier = NeuralNetworkClassifier(
qnn,
optimizer=COBYLA(maxiter=400), # Set max iterations here
callback=callback_graph,
initial_point=algorithm_globals.random.random(qnn.num_weights),
)
x = np.asarray(train_images)
y = np.asarray(train_labels)
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
opflow_classifier.fit(x, y)
# score classifier
print(f"Accuracy from the train data : {np.round(100 * opflow_classifier.score(x, y), 2)}%")
test_images, test_labels = generate_dataset(10)
y_predict = opflow_classifier.predict(test_images)
x = np.asarray(test_images)
y = np.asarray(test_labels)
print(f"Accuracy from the test data : {np.round(100 * opflow_classifier.score(x, y), 2)}%")
# Let's see some examples in our dataset
fig, ax = plt.subplots(1, 2, figsize=(10, 6), subplot_kw={"xticks": [], "yticks": []})
for i in range(0, 2):
ax[i % 2].imshow(test_images[i].reshape(2, 4), aspect="equal")
if y_predict[i] == -1:
ax[i % 2].set_title("The QCNN predicts this is a Horizontal Line")
if y_predict[i] == +1:
ax[ i % 2].set_title("The QCNN predicts this is a Vertical Line")
plt.subplots_adjust(wspace=0.1, hspace=0.5)
!jupyter nbconvert 'QCNN_presentation.ipynb' --to slides --post serve
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
class BaseEstimatorGradient(ABC):
def __init__(
self,
estimator: BaseEstimator,
options: Options | None = None,
):
...
def run(
self,
circuits: Sequence[QuantumCircuit],
observables: Sequence[BaseOperator | PauliSumOp],
parameter_values: Sequence[Sequence[float]],
parameters: Sequence[Sequence[Parameter] | None] | None = None,
**options,
) -> AlgorithmJob:
...
@dataclass(frozen=True)
class EstimatorGradientResult:
"""Result of EstimatorGradient."""
gradients: list[np.ndarray]
"""The gradients of the expectation values."""
metadata: list[dict[str, Any]]
"""Additional information about the job."""
options: Options
"""Primitive runtime options for the execution of the job."""
from qiskit.algorithms.gradients import ParamShiftEstimatorGradient
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes
from qiskit.primitives import Estimator
from qiskit.quantum_info import SparsePauliOp
qc = RealAmplitudes(num_qubits=2, reps=1)
op = SparsePauliOp.from_list([("ZI", 1)])
qc.decompose().draw('mpl')
# How to use the Estimator
estimator = Estimator()
values = [1, 2, 3, 4]
# run a job
job = estimator.run([qc], [op], [values])
# get results
result = job.result().values
result
# How to use the ParamShiftEstimatorGradient
param_shift_grad = ParamShiftEstimatorGradient(estimator)
# run a job
job = param_shift_grad.run([qc], [op], [values])
# get results
result = job.result().gradients
result
estimator = Estimator()
# Instantiate a grdient class
param_shift_grad = ParamShiftEstimatorGradient(estimator)
values1 = [1, 2, 3, 4]
values2 = [5, 6, 7, 8]
# run a job
job = param_shift_grad.run([qc]*2, [op]*2, [values1, values2])
# get results
result = job.result().gradients
result
# EstimatorGradient
param_shift_grad = ParamShiftEstimatorGradient(estimator)
param = [p for p in qc.parameters]
# run a job
# param = list of parameters in the quantum circuit
job = param_shift_grad.run([qc]*2, [op]*2, [values, values], [param, param[:2]])
# get results
result = job.result()
result.gradients
# metadata in EstimatorGradientResult stores which parameters' graidents were caluclated
print("result.metadata")
print(result.metadata[0])
print(result.metadata[1])
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.quantum_info.operators import SparsePauliOp
H2_op = SparsePauliOp(
["II", "IZ", "ZI", "ZZ", "XX"],
coeffs=[
-1.052373245772859,
0.39793742484318045,
-0.39793742484318045,
-0.01128010425623538,
0.18093119978423156,
],
)
aux_op1 = SparsePauliOp(["ZZ"], coeffs=[2.0])
aux_op2 = SparsePauliOp(["XX"], coeffs=[-2.0])
from IPython.display import clear_output
import matplotlib.pyplot as plt
counts = []
values = []
def plot_energy(eval_count, parameters, mean, metadata):
counts.append(eval_count)
values.append(mean)
clear_output(wait=True)
plt.plot(counts, values)
plt.xlabel("Evaluation count")
plt.ylabel("Energy")
plt.title("Energy convergence")
plt.show()
from qiskit.circuit.library import TwoLocal
from qiskit.primitives import Estimator
from qiskit.algorithms.minimum_eigensolvers import VQE, VQEResult
from qiskit.algorithms.optimizers import SLSQP
# Reset plotting arrays.
counts.clear()
values.clear()
# Define the central primitive.
estimator = Estimator()
# Define our ansatz
ansatz = TwoLocal(rotation_blocks="ry", entanglement_blocks="cz")
slsqp = SLSQP()
# Instantiate VQE. Note that all arguments after the optimizer must be specified by keyword.
vqe = VQE(estimator, ansatz, slsqp, callback=plot_energy)
# Compute the minimum eigenvalue. Not the primary operator is the H2_op. The auxillary operators are just variants for illustration.
# Note that None values are no longer supported. Zero values will be interpreted as a zero operator.
result = vqe.compute_minimum_eigenvalue(H2_op, [aux_op1, aux_op2, 0])
print(result)
# Reset plotting arrays.
counts.clear()
values.clear()
result = vqe.compute_minimum_eigenvalue(H2_op, {"aux_op1": aux_op1, "aux_op2": aux_op2})
print(result)
from IPython.display import clear_output
import matplotlib.pyplot as plt
import numpy as np
counts = []
values = []
std_dev = []
def plot_energy_error(eval_count, parameters, mean, metadata):
counts.append(eval_count)
values.append(mean)
variance = metadata.get("variance", 0.0)
shots = metadata.get("shots", 0.0)
std_dev.append(np.sqrt(variance / shots) if shots > 0 else 0.0)
clear_output(wait=True)
plt.errorbar(counts, values, std_dev)
plt.xlabel("Evaluation count")
plt.ylabel("Energy")
plt.title("Energy convergence")
plt.show()
counts.clear()
values.clear()
from qiskit.algorithms.optimizers import COBYLA
cobyla = COBYLA(maxiter=1000)
estimator = Estimator(options={"shots": 1024})
vqe = VQE(estimator, ansatz, cobyla, callback=plot_energy_error)
result = vqe.compute_minimum_eigenvalue(H2_op, [aux_op1, aux_op2])
print(result)
counts.clear()
values.clear()
from qiskit.algorithms.gradients import FiniteDiffEstimatorGradient
estimator = Estimator()
gradient = FiniteDiffEstimatorGradient(estimator, epsilon=0.001)
vqe = VQE(estimator, ansatz, slsqp, gradient=gradient, callback=plot_energy)
result = vqe.compute_minimum_eigenvalue(H2_op, [H2_op ** 2, H2_op / 2])
print(result)
counts.clear()
values.clear()
from qiskit.algorithms.gradients import ParamShiftEstimatorGradient
from qiskit.algorithms.optimizers import GradientDescent
estimator = Estimator()
gradient = FiniteDiffEstimatorGradient(estimator, epsilon=0.001)
vqe = VQE(estimator,
ansatz,
GradientDescent(maxiter=200, learning_rate=0.1),
gradient=ParamShiftEstimatorGradient(estimator),
)
result = vqe.compute_minimum_eigenvalue(H2_op, [H2_op ** 2, H2_op / 2])
print(result)
from qiskit.circuit.library import TwoLocal
from qiskit.primitives import Sampler
from qiskit.algorithms.minimum_eigensolvers import SamplingVQE
from qiskit.algorithms.optimizers import SLSQP
# Reset plotting arrays.
counts.clear()
values.clear()
# Define the central primitive.
sampler = Sampler()
# Instantiate SamplingVQE. Note that all arguments after the optimizer must be specified by keyword.
sampling_vqe = SamplingVQE(sampler, ansatz, slsqp, callback=plot_energy)
result = sampling_vqe.compute_minimum_eigenvalue(H2_op)
# Reset plotting arrays.
counts.clear()
values.clear()
diag_op = SparsePauliOp(["ZZ", "IZ", "II"], coeffs=[1, -0.5, 0.12])
sampler = Sampler()
from qiskit.circuit.library import RealAmplitudes
sampling_vqe = SamplingVQE(
sampler,
RealAmplitudes(2, reps=1),
SLSQP(),
aggregation=0.1,
callback=plot_energy,
)
sampling_vqe.compute_minimum_eigenvalue(operator=diag_op)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.utils.deprecation import deprecate_function
from qiskit.utils.deprecation import deprecate_arguments
class DummyClass:
"""This is short description. Let's make it
multiline"""
def __init__(self, arg1: int = None, arg2: [int] = None):
self.arg1 = arg1
self.arg2 = arg2
@deprecate_function(
"The DummyClass.foo() method is being deprecated. Use the DummyClass.some_othermethod()",
since="1.2.3",
)
def foo_deprecated(self, index_arg2: int):
"""A multi-line
docstring.
Here are more details.
Args:
index_arg2: `index_arg2` description
Returns:
int: returns `arg2[index_arg2]`
Raises:
QiskitError: if `len(self.arg2) < index_arg2`
"""
if len(self.arg2) < index_arg2:
raise QiskitError("there is an error")
return self.arg2[index_arg2]
@deprecate_arguments({"if_arg1": "other_if_arg1"}, since="1.2.3")
def bar_with_deprecated_arg(
self, if_arg1: int = None, index_arg2: int = None, other_if_arg1: int = None
):
"""
A multi-line short
docstring.
This is the long description
Args:
if_arg1: `if_arg1` description with
multi-line
index_arg2: `index_arg2` description
other_if_arg1: `other_if_arg1` description
Returns:
int or None: if `if_arg1 == self.arg1`, returns `arg2[index_arg2]`
"""
if other_if_arg1 == self.arg1 or if_arg1 == self.arg1:
return self.arg2[index_arg2]
return None
d = DummyClass(1, [1,2])
d.foo_deprecated(0)
print(d.foo_deprecated.__doc__)
d.bar_with_deprecated_arg(if_arg1=0)
d.bar_with_deprecated_arg(other_if_arg1=0)
print(d.bar_with_deprecated_arg.__doc__)
def f():
"""Normal docstring"""
pass
deprecate_function("deprecation message", since="1.2.3")(f)
print(f.__doc__)
def f():
"""Normal docstring"""
pass
deprecate_function("deprecation message", modify_docstring=False, since="1.2.3")(f)
print(f.__doc__)
def f():
"""Normal docstring"""
pass
deprecate_function("deprecation message", modify_docstring=True)(f)
print(f.__doc__)
from qiskit import __version__
def f():
"""Normal docstring"""
pass
deprecate_function("deprecation message", modify_docstring=True, since=__version__)(f)
print(f.__doc__)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import os
from typing import Any, Dict, List, Optional, Union
import numpy as np
import matplotlib.pyplot as plt
from qiskit import IBMQ, QuantumCircuit, QuantumRegister, ClassicalRegister, quantum_info as qi
from qiskit.providers.ibmq import RunnerResult
from qiskit.result import marginal_counts
import qiskit.tools.jupyter
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
%load_ext autoreload
%autoreload 2
# Note: This can be any hub/group/project that has access to the required device and the Qiskit runtime.
# Verify that ``qasm3`` is present in ``backend.configuration().supported_features``.
hub = "ibm-q-community"
group = "ieee-demos"
project = "main"
backend_name = "ibm_peekskill"
hgp = f"{hub}/{group}/{project}"
from qiskit.circuit import Delay, Parameter
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(instance=hgp)
backend = service.backend(backend_name, instance=hgp)
# Addresses a bug in the current Qiskit runtime package
# that will be fixed on the next Qiskit release
target = backend.target
if "delay" not in target:
target.add_instruction(
Delay(Parameter("t")), {(bit,): None for bit in range(target.num_qubits)}
)
from qiskit.providers.aer import AerSimulator, Aer
from qiskit.providers.aer.noise import NoiseModel
backend_noise_model = NoiseModel.from_backend(backend)
#backend_sim = AerSimulator(noise_model=backend_noise_model)
backend_sim = AerSimulator()
ideal_sim = Aer.get_backend('qasm_simulator')
import utils
backend
shots: int = 1024 # Number of shots to run each circuit for
init_num_resets: int = 3 # Set the number of resets to initialize qubits between circuits
init_delay: float = 0. # Set the initialization idle time for qubits between circuits
sim: bool = False # Set True to simulate our experiments and False to run in hardware
dynamical_decoupling: bool = False # Set True to enable dynamical decoupling
favourite_qubits: List[int] = [5, 3, 8, 2, 9]
# Some general helper routines that will be used throughout the notebook
import mapomatic as mm
from qiskit.circuit.library import XGate
from qiskit.transpiler import PassManager
from qiskit.transpiler.instruction_durations import InstructionDurations
from qiskit_ibm_runtime import RuntimeJob, IBMBackend
def convert_cycles(time_in_seconds: float, backend: IBMBackend) -> int:
cycles = time_in_seconds / (backend.configuration().dt)
return int(cycles + (16 - (cycles % 16)))
def execute(circuits: List[QuantumCircuit], verbose: bool = True, **kwargs) -> RuntimeJob:
"""A helper method to execute circuits with common settings in the notebook.
Args:
circuits: To execute.
verbose: Emit execution information.
sim: Run with simulator instead of hardware. By default reads global variable "sim"
"""
if isinstance(circuits, QuantumCircuit):
circuits = [circuits]
if sim:
return backend_sim.run(circuits, shots=shots, **kwargs)
else:
job = run_openqasm3(circuits, backend, verbose=False, shots=shots, init_num_resets=init_num_resets, init_delay=init_delay, **kwargs)
if verbose:
print(f"Running on qasm3, job id: {job.job_id}")
return job
def calculate_initial_layout(
circuit: QuantumCircuit,
mapomatic: Optional[bool] = None,
favourite_qubits: Optional[List[int]] = None,
blacklist_qubits: Optional[List[int]] = None
) -> List[int]:
"""Routine to help choose the ideal qubit layout given an input circuit.
Args:
circuit: To evaluate layout for.
mapomatic: Use mapomatic to choose ideal qubits, otherwise naievly choose ``favourite_qubits``
favourite_qubits: A list of favourite qubits to ensure are in the layout
blacklist_qubits: Qubits that are not allowed in the layout.
"""
deflated = mm.deflate_circuit(circuit)
if mapomatic:
deflated = mm.deflate_circuit(circuit)
available_layouts = [layout for layout in mm.matching_layouts(deflated, backend.configuration().coupling_map, strict_direction=False) if layout[:len(favourite_qubits)] == favourite_qubits and not set(layout).intersection(set(blacklist_qubits))]
scores = mm.evaluate_layouts(deflated, available_layouts, backend)
return scores[0][0]
if favourite_qubits is None:
favourite_qubits = []
if blacklist_qubits is None:
blacklist_qubits = []
return (favourite_qubits + list(set(range(backend.num_qubits)) - set(favourite_qubits)))[:deflated.num_qubits]
def apply_dynamical_decoupling(
circuits: List[QuantumCircuit],
backend: IBMBackend,
initial_layout: List[int],
) -> List[QuantumCircuit]:
"""Apply dynamic circuit dynamical decoupling."""
from qiskit_ibm_provider.transpiler.passes.scheduling import DynamicCircuitScheduleAnalysis, PadDynamicalDecoupling
dd_sequence = [XGate(), XGate()]
durations = InstructionDurations.from_backend(backend)
pm = PassManager(
[
DynamicCircuitScheduleAnalysis(durations),
PadDynamicalDecoupling(durations, dd_sequence, qubits=initial_layout),
]
)
return pm.run(circuits)
def apply_transpile(
circuits: List[QuantumCircuit],
backend: IBMBackend,
initial_layout: List[int],
dynamical_decoupling: bool = dynamical_decoupling,
**transpile_kwargs,
) -> List[QuantumCircuit]:
"""Fixed transpile routine to be used for consistency throughout the notebook."""
transpiled_circuits = transpile(circuits, backend, initial_layout=initial_layout, optimization_level=3, **transpile_kwargs)
if dynamical_decoupling:
return apply_dynamical_decoupling(transpiled_circuits, backend, initial_layout)
return transpiled_circuits
qubit = favourite_qubits[0]
print(f"We will use qubit {qubit}")
# Exercise
from utils import run_openqasm3
from qiskit import QuantumRegister, ClassicalRegister, transpile
qr = QuantumRegister(1)
crx = ClassicalRegister(1, name="xresult")
crm = ClassicalRegister(1, name="measureresult")
qc_reset = QuantumCircuit(qr, crx, crm, name="Reset")
# Complete the circuit to measure and conditionally reset the target
# qubit.
qc_reset = transpile(qc_reset, backend)
qc_reset.draw(output="mpl", idle_wires=False)
# Solution
from utils import run_openqasm3
from qiskit import QuantumRegister, ClassicalRegister, transpile
qr = QuantumRegister(1)
crx = ClassicalRegister(1, name="xresult")
crm = ClassicalRegister(1, name="measureresult")
qc_reset = QuantumCircuit(qr, crx, crm, name="Reset")
qc_reset.x(0)
qc_reset.measure(0, crx)
qc_reset.x(0).c_if(crx, 1)
qc_reset.measure(0, crm)
qc_reset = transpile(qc_reset, backend)
qc_reset.draw(output="mpl", idle_wires=False)
reset_job = run_openqasm3(qc_reset, backend, verbose=True, shots=shots, init_num_resets=0, init_delay=0) # Turn off automatic init
from qiskit.result import marginal_counts
reset_result = reset_job.result()
reset_counts = reset_result.get_counts(0)
# Look at only the final measurement result
initial_measurement_result = marginal_counts(reset_counts, indices=[0])
mitigated_reset_results = marginal_counts(reset_counts, indices=[1])
print(f"Full counts including reset: {reset_counts}")
print(f"Conditional X gate applied on: {initial_measurement_result['1']} shots")
print(f"Results from our reset - |0\rangles prepared: {mitigated_reset_results.get('0')}, |1\rangles prepared: {mitigated_reset_results.get('1', 0)}"
)
from qiskit import qasm3
basis_gates = backend.configuration().basis_gates
def dump_qasm3(circuit: QuantumCircuit, backend: IBMBackend = backend) -> str:
return qasm3.Exporter(includes=[],
basis_gates=backend.configuration().basis_gates,
disable_constants=True).dumps(circuit)
qasm3_reset = dump_qasm3(qc_reset)
print(qasm3_reset)
runtime_job = run_openqasm3(qasm3_reset, backend, verbose=True, shots=shots, init_num_resets=0, init_delay=0)
classical_compute_qasm3 = """
OPENQASM 3.0;
bit a;
bit b;
bit c;
x $0;
x $2;
a = measure $0; // expected "1"
b = measure $1; // expected "0"
c = measure $2; // expected "1"
bit[3] d = "100";
if (((a | b) & c) == 1) {
// Path will nearly always execute
d[0] = 1;
} else {
// Path will rarely execute outside of SPAM errors
d[0] = 0;
}
bit[3] e = "101";
// Conditionally execute based on classical bit array comparison
// Should always execute
if (d == e) {
x $1;
}
bit final;
final = measure $1; // expected "1"
"""
classical_compute_job = run_openqasm3(classical_compute_qasm3, backend, verbose=False, shots=shots)
qr = QuantumRegister(3)
cr = ClassicalRegister(3, name="result")
crz = cr[0]
crx = cr[1]
result = cr[2]
qc_teleport = QuantumCircuit(qr, cr, name="Teleport")
# Apply teleportation circuit
qc_teleport.h(qr[1])
qc_teleport.cx(qr[1], qr[2])
qc_teleport.cx(qr[0], qr[1])
qc_teleport.h(qr[0])
qc_teleport.measure(qr[0], crz)
qc_teleport.measure(qr[1], crx)
qc_teleport.z(qr[2]).c_if(crz, 1)
qc_teleport.x(qr[2]).c_if(crx, 1)
qc_teleport.draw(output="mpl")
# Prepare |1> for teleportation
qc_state_prep = QuantumCircuit(1)
qc_state_prep.x(0)
print(qc_state_prep)
target_state_prep = qi.Statevector.from_instruction(qc_state_prep)
target_state_prep.draw(output="bloch")
teleport_qubits = favourite_qubits[:3]
qc_teleport_state = QuantumCircuit(qr, cr, name="Teleport Hadamard")
# Prepare state to teleport
qc_teleport_state.compose(qc_state_prep, [qr[0]], inplace=True)
qc_teleport_state.barrier(qr)
# Compose with teleportation circuit
qc_teleport_state.compose(qc_teleport, inplace=True)
qc_teleport_state.draw(output="mpl")
qc_teleport_experiment = qc_teleport_state.copy()
qc_teleport_experiment.measure(qr[2], result)
qc_teleport_experiment = apply_transpile(qc_teleport_experiment, backend, initial_layout=teleport_qubits)
qc_teleport_experiment.draw(output="mpl", idle_wires=True)
teleport_job = execute(qc_teleport_experiment, verbose=False)
teleport_result = teleport_job.result()
print(f"All teleportation counts: {teleport_result.get_counts(0)}")
marginal_teleport_counts = marginal_counts(teleport_result.get_counts(0), indices=[2])
print(f"Marginalized teleportation counts: {marginal_teleport_counts}")
p1 = 0.01
p3 = 3 * p1**2 * (1-p1) + p1**3 # probability of 2 or 3 errors
print('Probability of a single reply being garbled: {}'.format(p1))
print('Probability of a majority of the three replies being garbled: {:.4f}'.format(p3))
# Approximate duration of the measurement processing / conditional latency
block_branch_duration = 0.5e-6
block_branch_cycles = convert_cycles(block_branch_duration, backend)
# Setup a base quantum circuit for our experiments
qreg_data = QuantumRegister(3)
qreg_measure = QuantumRegister(2)
creg_data = ClassicalRegister(3)
creg_syndrome = ClassicalRegister(2)
state_data = qreg_data[0]
ancillas_data = qreg_data[1:]
def build_qc() -> QuantumCircuit:
return QuantumCircuit(qreg_data, qreg_measure, creg_data, creg_syndrome)
# Exercise
qc_init = build_qc()
# Complete to initialize the |1> state
qc_init.barrier(qreg_data)
qc_init.draw(output="mpl")
# Solution
qc_init = build_qc()
qc_init.x(qreg_data[0])
qc_init.barrier(qreg_data)
qc_init.draw(output="mpl")
# Exercise
# Complete the method below to encode the bit-flip code
def encode_bit_flip(qc, state, ancillas):
# Complete
qc.barrier(state, *ancillas)
return qc
qc_encode_bit = build_qc()
encode_bit_flip(qc_encode_bit, state_data, ancillas_data)
qc_encode_bit.draw(output="mpl")
# Solution
def encode_bit_flip(qc, state, ancillas):
control = state
for ancilla in ancillas:
qc.cx(control, ancilla)
qc.barrier(state, *ancillas)
return qc
qc_encode_bit = build_qc()
encode_bit_flip(qc_encode_bit, state_data, ancillas_data)
qc_encode_bit.draw(output="mpl")
# Exercise
# Complete the routine below to decode from the bit-flip codespace
def decode_bit_flip(qc, state, ancillas):
#complete
return qc
qc_decode_bit = build_qc()
qc_decode_bit = decode_bit_flip(qc_decode_bit, state_data, ancillas_data)
qc_decode_bit.draw(output="mpl")
qc_encoded_state_bit = qc_init.compose(qc_encode_bit)
qc_encoded_state_bit.draw(output="mpl")
# Solution
def decode_bit_flip(qc, state, ancillas):
inv = qc_encode_bit.inverse()
return qc.compose(inv)
qc_decode_bit = build_qc()
qc_decode_bit = decode_bit_flip(qc_decode_bit, state_data, ancillas_data)
qc_decode_bit.draw(output="mpl")
# Exercise
# Complete the syndome measurement routine below
def measure_syndrome_bit(qc, qreg_data, qreg_measure, creg_measure):
# Complete
qc.barrier(*qreg_data, *qreg_measure)
return qc
qc_syndrome_bit = measure_syndrome_bit(build_qc(), qreg_data, qreg_measure, creg_syndrome)
qc_syndrome_bit.draw(output="mpl")
qc_measure_syndrome_bit = qc_encoded_state_bit.compose(qc_syndrome_bit)
qc_measure_syndrome_bit.draw(output="mpl")
# Solution
def measure_syndrome_bit(qc, qreg_data, qreg_measure, creg_measure):
def branch_delay():
if sim:
qc.barrier(qreg_data)
qc.delay(block_branch_cycles, qreg_data)
qc.barrier(qreg_data)
qc.cx(qreg_data[0], qreg_measure[0])
qc.cx(qreg_data[1], qreg_measure[0])
qc.cx(qreg_data[0], qreg_measure[1])
qc.cx(qreg_data[2], qreg_measure[1])
qc.measure(qreg_measure, creg_measure)
branch_delay() # grouped in hardware
qc.x(qreg_measure[0]).c_if(creg_measure[0], 1)
qc.x(qreg_measure[1]).c_if(creg_measure[1], 1)
qc.barrier(*qreg_data, *qreg_measure)
return qc
qc_syndrome_bit = measure_syndrome_bit(build_qc(), qreg_data, qreg_measure, creg_syndrome)
qc_syndrome_bit.draw(output="mpl")
# Exercise
# Complete the routine below to apply our decoding and correction sequence
def apply_correction_bit(qc, qreg_data, creg_syndrome):
# Complete
qc.barrier(qreg_data)
return qc
qc_correction_bit = apply_correction_bit(build_qc(), qreg_data, creg_syndrome)
qc_correction_bit.draw(output="mpl")
def apply_final_readout(qc, qreg_data, creg_data):
"""Apply inverse mapping so that we always try and measure |1> in the computational basis.
TODO: The above is just a stand in for proper measurement basis measurement
"""
qc.barrier(qreg_data)
qc.measure(qreg_data, creg_data)
return qc
qc_final_measure = apply_final_readout(build_qc(), qreg_data, creg_data)
qc_final_measure.draw(output="mpl")
bit_code_circuit = qc_measure_syndrome_bit.compose(qc_correction_bit).compose(qc_final_measure)
bit_code_circuit.draw(output="mpl")
# Solution
def apply_correction_bit(qc, qreg_data, creg_syndrome):
# If simulating we need to insert a delay to mirror the hardware
def branch_delay():
if sim:
qc.barrier(qreg_data)
qc.delay(block_branch_cycles, qreg_data)
qc.barrier(qreg_data)
branch_delay()
qc.x(qreg_data[0]).c_if(creg_syndrome, 3)
branch_delay()
qc.x(qreg_data[1]).c_if(creg_syndrome, 1)
branch_delay()
qc.x(qreg_data[2]).c_if(creg_syndrome, 2)
qc.barrier(qreg_data)
return qc
qc_correction_bit = apply_correction_bit(build_qc(), qreg_data, creg_syndrome)
qc_correction_bit.draw(output="mpl")
def build_error_correction_sequence(
qc_base: QuantumCircuit,
qc_init: Optional[QuantumCircuit],
qc_encode: QuantumCircuit,
qc_channels: List[QuantumCircuit],
qc_syndrome: QuantumCircuit,
qc_correct: QuantumCircuit,
qc_decode: Optional[QuantumCircuit] = None,
qc_final: Optional[QuantumCircuit] = None,
name=None,
) -> QuantumCircuit:
"""Build a typical error correction circuit"""
qc = qc_base
if qc_init:
qc = qc.compose(
qc_init
)
qc = qc.compose(
qc_encode
)
if name is not None:
qc.name = name
if not qc_channels:
qc_channels = [QuantumCircuit(*qc.qregs)]
for qc_channel in qc_channels:
qc = qc.compose(
qc_channel
).compose(
qc_syndrome
).compose(
qc_correct
)
if qc_decode:
qc = qc.compose(qc_decode)
if qc_final:
qc = qc.compose(qc_final)
return qc
bit_code_circuit = build_error_correction_sequence(
build_qc(),
qc_init,
qc_encode_bit,
[],
qc_syndrome_bit,
qc_correction_bit,
None,
qc_final_measure,
)
bit_code_circuit.draw(output="mpl")
layout_circuit = transpile(bit_code_circuit, backend, optimization_level=3)
print(initial_layout := calculate_initial_layout(layout_circuit, False, favourite_qubits))
transpiled_bit_code_circuit = apply_transpile(bit_code_circuit, backend, initial_layout=initial_layout)
transpiled_bit_code_circuit.draw(output="mpl")
result = execute(transpiled_bit_code_circuit).result()
def decode_result(data_counts, syndrome_counts, verbose=True, indent=0):
shots = sum(data_counts.values())
success_trials = data_counts.get('000', 0) + data_counts.get('111', 0)
failed_trials = shots-success_trials
error_correction_events = shots-syndrome_counts.get('00', 0)
if verbose:
print(f"{' ' * indent}Bit flip errors were corrected on {error_correction_events}/{shots} trials")
print(f"{' ' * indent}A final parity error was detected on {failed_trials}/{shots} trials")
print(f"{' ' * indent}No error was detected on {success_trials}/{shots} trials")
return error_correction_events, failed_trials
data_indices = list(range(len(qreg_data)))
syndrome_indices = list(range(data_indices[-1]+1, len(qreg_data) + len(qreg_measure) ))
marginalized_data_result = marginal_counts(result, data_indices)
marginalized_syndrome_result = marginal_counts(result, syndrome_indices)
print(f'Completed bit code experiment data measurement counts {marginalized_data_result.get_counts(0)}')
print(f'Completed bit code experiment syndrome measurement counts {marginalized_syndrome_result.get_counts(0)}')
decode_result(marginalized_data_result.get_counts(0), marginalized_syndrome_result.get_counts(0));
from qiskit.circuit.library import IGate, XGate, ZGate
qreg_error_ancilla = QuantumRegister(1)
creg_error_ancilla = ClassicalRegister(1)
def build_random_error_channel(gate, ancilla, creg_ancilla, error_qubit):
"""Build an error channel that randomly applies a single-qubit gate based on an ancilla qubit measurement result"""
qc = build_qc()
qc.add_register(qreg_error_ancilla)
qc.add_register(creg_error_ancilla)
qc.barrier(ancilla, error_qubit.register)
# 50-50 chance of applying a bit-flip
qc.h(ancilla)
qc.measure(ancilla, creg_ancilla)
qc.append(gate, [error_qubit]).c_if(creg_ancilla, 1)
qc.barrier(ancilla, error_qubit.register)
return qc
qc_id_error_channel = build_random_error_channel(IGate(), qreg_error_ancilla, creg_error_ancilla, qreg_data[0])
print("Identity error channel")
print(qc_id_error_channel.draw(idle_wires=False,output='text',fold=-1))
qc_bit_flip_error_channel = build_random_error_channel(XGate(), qreg_error_ancilla, creg_error_ancilla, qreg_data[0])
print("Bit flip error channel")
print(qc_bit_flip_error_channel.draw(idle_wires=False,output='text',fold=-1))
qc_phase_flip_error_channel = build_random_error_channel(ZGate(), qreg_error_ancilla, creg_error_ancilla, qreg_data[0])
print("Phase flip error channel")
print(qc_phase_flip_error_channel.draw(idle_wires=False,output='text',fold=-1))
def build_error_channel_base():
qc = build_qc()
qc.add_register(qreg_error_ancilla)
qc.add_register(creg_error_ancilla)
return qc
qc_id_error_bit_flip_code = build_error_correction_sequence(
build_error_channel_base(),
qc_init,
qc_encode_bit,
[qc_id_error_channel],
qc_syndrome_bit,
qc_correction_bit,
None,
qc_final_measure,
"Identity error channel"
)
qc_bit_flip_error_bit_flip_code = build_error_correction_sequence(
build_error_channel_base(),
qc_init,
qc_encode_bit,
[qc_bit_flip_error_channel],
qc_syndrome_bit,
qc_correction_bit,
None,
qc_final_measure,
"Bit flip error channel"
)
qc_phase_flip_error_bit_flip_code = build_error_correction_sequence(
build_error_channel_base(),
qc_init,
qc_encode_bit,
[qc_phase_flip_error_channel],
qc_syndrome_bit,
qc_correction_bit,
None,
qc_final_measure,
"Phase flip error channel"
)
circuits_error_channels_bit_flip_code = [qc_id_error_bit_flip_code, qc_bit_flip_error_bit_flip_code, qc_phase_flip_error_bit_flip_code]
qc_bit_flip_error_bit_flip_code.draw(output="mpl")
# We need to add an extra ancilla qubit to our layout
# It doesn't matter which qubit for the must part as we are using as a
# source of random information
error_channel_layout = error_channel_layout = initial_layout + list(set(range(circuits_error_channels_bit_flip_code[0].num_qubits)) - set(initial_layout))[:1]
transpiled_circuits_error_channels_bit_flip_code = apply_transpile(circuits_error_channels_bit_flip_code, backend, initial_layout=error_channel_layout)
result_error_channels_bit_flip_code = execute(transpiled_circuits_error_channels_bit_flip_code).result()
from qiskit.quantum_info.analysis import hellinger_fidelity
def decode_error_channel_result(qc_init, data_counts, syndrome_counts, verbose=True, indent=0):
shots = sum(data_counts.values())
logical_zero = data_counts.get('000', 0)
logical_one = data_counts.get('111', 0)
success_trials = logical_zero + logical_one
failed_trials = shots-success_trials
logical_counts = {"0": logical_zero, "1": logical_one}
ideal_transpiled = apply_transpile(qc_init_outcome, backend, initial_layout)
counts_ideal = marginal_counts(ideal_sim.run(ideal_transpiled, shots=success_trials).result().get_counts(0), indices=[0])
fidelity = hellinger_fidelity(counts_ideal, logical_counts)
error_correction_events = shots-syndrome_counts.get('00', 0)
if verbose:
print(f"{' ' * indent}Bit flip errors were corrected on {error_correction_events}/{shots} trials")
print(f"{' ' * indent}A final parity error was detected on {failed_trials}/{shots} trials")
print(f"{' ' * indent}For the successful trials the Hellinger fidelity is {fidelity}")
return error_correction_events, failed_trials
qc_init_outcome = qc_init.copy()
qc_init_outcome.measure(qreg_data[0], 0)
qreg_indices = list(range(len(qreg_data)))
data_indices = qreg_indices[:1]
syndrome_indices = list(range(qreg_indices[-1]+1, len(qreg_data) + len(qreg_measure) ))
result_decoded_data_qubit_marginal_err_ch = marginal_counts(result_error_channels_bit_flip_code, data_indices)
result_data_marginal_err_ch = marginal_counts(result_error_channels_bit_flip_code, qreg_indices)
result_syndrome_marginal_err_ch = marginal_counts(result_error_channels_bit_flip_code, syndrome_indices)
for i, qc in enumerate(transpiled_circuits_error_channels_bit_flip_code):
print(f"For {qc.name} with bit flip code")
print(f' Completed bit code experiment decoded data qubit measurement counts {result_decoded_data_qubit_marginal_err_ch.get_counts(i)}')
print(f' Completed bit code experiment data qubits measurement counts {result_data_marginal_err_ch.get_counts(i)}')
print(f' Completed bit code experiment syndrome measurement counts {result_data_marginal_err_ch.get_counts(i)}')
decode_error_channel_result(qc_init_outcome, result_data_marginal_err_ch.get_counts(i), result_syndrome_marginal_err_ch.get_counts(i), indent=4);
print("")
# Exercise
# Complete the routine below to apply an "identity" correction rather than
# the standard bit-flip correction sequence
def apply_no_correction_bit(qc, qreg_data, creg_syndrome):
"""Complete the method below to apply an "identity correction"
Make sure to still apply the conditional gates so that the comparison to the bit-flip
code is faithful.
"""
# Complete
qc.barrier(qreg_data)
return qc
qc_no_correction_bit = apply_no_correction_bit(build_qc(), qreg_data, creg_syndrome)
qc_no_correction_bit.draw(output="mpl")
# Exercise
# Complete the sequence using `build_error_correction_sequence` to evaluate the
# performance of the identity correction sequence
# Complete the following routines
qc_id_error_no_correct = QuantumCircuit() # build_error_correction_sequence(...)
qc_bit_flip_error_no_correct = QuantumCircuit() # build_error_correction_sequence(...)
qc_phase_flip_error_no_correct = QuantumCircuit() # build_error_correction_sequence(...)
circuits_error_channels_no_correct = [qc_id_error_no_correct, qc_bit_flip_error_no_correct, qc_phase_flip_error_no_correct]
qc_id_error_no_correct.draw(output="mpl")
# We need to add an extra ancilla qubit to our layout
# It doesn't matter which qubit for the must part as we are using as a
# source of random information
error_channel_layout = error_channel_layout = initial_layout + list(set(range(circuits_error_channels_bit_flip_code[0].num_qubits)) - set(initial_layout))[:1]
transpiled_circuits_error_channels_no_correct = apply_transpile(circuits_error_channels_no_correct, backend, initial_layout=error_channel_layout)
result_error_channels_no_correct = execute(transpiled_circuits_error_channels_no_correct).result()
qc_init_outcome = qc_init.copy()
qc_init_outcome.measure(qreg_data[0], 0)
qreg_indices = list(range(len(qreg_data)))
data_indices = qreg_indices[:1]
syndrome_indices = list(range(qreg_indices[-1]+1, len(qreg_data) + len(qreg_measure) ))
result_decoded_data_qubit_marginal_no_correct = marginal_counts(result_error_channels_no_correct, data_indices)
result_data_marginal_no_correct = marginal_counts(result_error_channels_no_correct, qreg_indices)
result_syndrome_marginal_no_correct = marginal_counts(result_error_channels_no_correct, syndrome_indices)
for i, qc in enumerate(transpiled_circuits_error_channels_no_correct):
print(f"For {qc.name} with bit flip code")
print(f' Completed bit code experiment decoded data qubit measurement counts {result_decoded_data_qubit_marginal_no_correct.get_counts(i)}')
print(f' Completed bit code experiment data qubits measurement counts {result_data_marginal_no_correct.get_counts(i)}')
print(f' Completed bit code experiment syndrome measurement counts {result_data_marginal_no_correct.get_counts(i)}')
decode_error_channel_result(qc_init_outcome, result_data_marginal_no_correct.get_counts(i), result_syndrome_marginal_no_correct.get_counts(i), indent=4);
print("")
def apply_final_readout_invert(qc, qc_init, qreg_data, creg_data):
"""Apply inverse mapping so that we always try and measure |1> in the computational basis."""
qc = qc.compose(qc_init.inverse())
qc.x(qreg_data[0])
qc.barrier(qreg_data)
qc.measure(qreg_data, creg_data)
return qc
qc_final_measure_invert = apply_final_readout_invert(build_qc(), qc_init, qreg_data, creg_data)
qc_final_measure_invert.draw(output="mpl")
# Solution
qc_id_error_no_correct = build_error_correction_sequence(
build_error_channel_base(),
qc_init,
qc_encode_bit,
[qc_id_error_channel],
qc_syndrome_bit,
qc_no_correction_bit,
None,
qc_final_measure,
"Identity error channel"
)
qc_bit_flip_error_no_correct = build_error_correction_sequence(
build_error_channel_base(),
qc_init,
qc_encode_bit,
[qc_bit_flip_error_channel],
qc_syndrome_bit,
qc_no_correction_bit,
None,
qc_final_measure,
"Bit flip error channel"
)
qc_phase_flip_error_no_correct = build_error_correction_sequence(
build_error_channel_base(),
qc_init,
qc_encode_bit,
[qc_phase_flip_error_channel],
qc_syndrome_bit,
qc_no_correction_bit,
None,
qc_final_measure,
"Phase flip error channel"
)
circuits_error_channels_no_correct = [qc_id_error_no_correct, qc_bit_flip_error_no_correct, qc_phase_flip_error_no_correct]
qc_id_error_no_correct.draw(output="mpl")
from collections import defaultdict
from qiskit.transpiler import PassManager, InstructionDurations
from qiskit.transpiler.passes import ASAPSchedule, PadDelay
from qiskit_ibm_provider.transpiler.passes.scheduling import DynamicCircuitScheduleAnalysis, PadDelay
def get_circuit_duration_(qc: QuantumCircuit) -> int:
"""Get duration of circuit in hardware cycles."""
durations = InstructionDurations.from_backend(backend)
pm = PassManager([DynamicCircuitScheduleAnalysis(durations)])
pm.run(qc)
node_start_times = pm.property_set["node_start_time"]
block_durations = defaultdict(int)
for inst, (block, t0) in node_start_times.items():
block_durations[block] = max(block_durations[block], t0+inst.op.duration)
duration = sum(block_durations.values())
# If we are running on real hardware the delays have not been appended to the
# circuit directly and are instead built into the conditional operations.
# We account for them manually here
if not sim:
duration += block_branch_cycles * (len(block_durations) - 1)
return duration
def build_idle_error_correction_sequence(
qc_base: QuantumCircuit,
qc_init: Optional[QuantumCircuit],
qc_encode: QuantumCircuit,
qc_channels: List[QuantumCircuit],
qc_syndrome: QuantumCircuit,
qc_correct: QuantumCircuit,
qc_decode: Optional[QuantumCircuit] = None,
qc_final: Optional[QuantumCircuit] = None,
initial_layout=initial_layout,
name: str = None,
) -> QuantumCircuit:
"""Build a quantum circuit that idles for the period of the input error correction sequence."""
qc = qc_base
if qc_init:
qc = qc.compose(
qc_init
)
if name is not None:
qc.name = name
qc_idle_region = qc_base.copy()
qc_idle_region.compose(
qc_encode
)
if not qc_channels:
qc_channels = [QuantumCircuit(*qc.qregs)]
for qc_channel in qc_channels:
qc_idle_region = qc_idle_region.compose(
qc_channel
).compose(
qc_syndrome
).compose(
qc_correct
)
if qc_decode:
qc_idle_region = qc_idle_region.compose(qc_decode)
qc_idle_transpiled = apply_transpile(qc_idle_region, backend, initial_layout=initial_layout, scheduling_method=None)
idle_duration = get_circuit_duration_(qc_idle_transpiled)
qc_idle = qc_base.copy()
qc_idle.barrier()
for qubit in qc_idle.qubits:
qc_idle.delay(idle_duration, qubit, unit="dt")
qc_idle.barrier()
qc = qc.compose(qc_idle)
if qc_final:
qc = qc.compose(qc_final_measure)
return qc
# Exercise
def build_idle_error_channel(time_in_seconds, qreg):
idle_cycles = convert_cycles(time_in_seconds, backend)
qc_idle = build_qc()
# Complete
qc_idle.barrier()
return qc_idle
# Solution
def build_idle_error_channel(time_in_seconds, qreg):
idle_cycles = convert_cycles(time_in_seconds, backend)
qc_idle = build_qc()
qc_idle.delay(idle_cycles, qreg, "dt")
qc_idle.barrier()
return qc_idle
# Exercise
num_rounds = 5
# Idle for a specified period in seconds
# This is how we build an idle error channel
idle_period = 5e-6
# Use the circuit we created above
qc_idle = build_idle_error_channel(idle_period, qreg_data)
qcs_corr_bit = []
qcs_no_corr_bit = []
qcs_idle_equiv_bit = []
for round_ in range(num_rounds):
qc_error_channels = [qc_idle] * (round_ + 1)
# Complete bit flip code iteration
qcs_corr_bit.append(QuantumCircuit())
# Complete no correction bit flip code iteration
qcs_no_corr_bit.append(QuantumCircuit())
# Complete idle equivalent bit flip code iteration
qcs_idle_equiv_bit.append(QuantumCircuit())
qcs_corr_bit[0].draw(output="mpl", idle_wires=False)
transpiled_qcs_corr_bit = apply_transpile(qcs_corr_bit, backend, initial_layout=initial_layout)
job_qcs_corr_bit = execute(transpiled_qcs_corr_bit)
transpiled_qcs_no_corr_bit = apply_transpile(qcs_no_corr_bit, backend, initial_layout=initial_layout)
job_qcs_no_corr_bit = execute(transpiled_qcs_no_corr_bit)
transpiled_qcs_idle_equiv_bit = apply_transpile(qcs_idle_equiv_bit, backend, initial_layout=initial_layout, dynamical_decoupling=True)
job_qcs_idle_equiv_bit = execute(transpiled_qcs_idle_equiv_bit)
result_qcs_corr_bit = job_qcs_corr_bit.result()
result_qcs_no_corr_bit = job_qcs_no_corr_bit.result()
result_qcs_idle_equiv_bit = job_qcs_idle_equiv_bit.result()
transpiled_qcs_idle_equiv_bit[-1].draw(output="mpl")
from qiskit.quantum_info.analysis import hellinger_fidelity
qc_init_outcome = qc_init.copy()
qc_init_outcome = qc_init_outcome.compose(qc_final_measure)
result_ideal = ideal_sim.run(apply_transpile(qc_init_outcome, backend, initial_layout)).result()
def calculate_hellinger_fidelity(result_ideal, result_experiment, data_qubit=0):
result_ideal = marginal_counts(result_ideal, indices=[data_qubit])
result_experiment = marginal_counts(result_experiment, indices=[data_qubit])
counts_ideal = result_ideal.get_counts(0)
hellinger_fidelities = []
for circuit_idx in range(len(result_experiment.results)):
hellinger_fidelities.append(hellinger_fidelity(counts_ideal, result_experiment.get_counts(circuit_idx)))
return hellinger_fidelities
# Evaluate the Hellinger fidelity using the routine above
fidelities_corr_bit = 0.
fidelities_no_corr_bit = 0.
fidelities_idle_equiv_bit = 0.
# Solution
num_rounds = 5
# Idle for a specified period in seconds
# This is how we build an idle error channel
idle_period = 5e-6
# Use the circuit we created above
qc_idle = build_idle_error_channel(idle_period, qreg_data)
qcs_corr_bit = []
qcs_no_corr_bit = []
qcs_idle_equiv_bit = []
for round_ in range(num_rounds):
qc_error_channels = [qc_idle] * (round_ + 1)
#qcs_corr_bit.append(QuantumCircuit())
qcs_corr_bit.append(
build_error_correction_sequence(
build_qc(),
qc_init,
qc_encode_bit,
qc_error_channels,
qc_syndrome_bit,
qc_correction_bit,
qc_decode_bit,
qc_final_measure_invert,
f"With Correction {round_}"
)
)
#qcs_no_corr_bit.append(QuantumCircuit())
qcs_no_corr_bit.append(
build_error_correction_sequence(
build_qc(),
qc_init,
qc_encode_bit,
qc_error_channels,
qc_syndrome_bit,
qc_no_correction_bit,
qc_decode_bit,
qc_final_measure_invert,
name=f"Without Correction {round_}"
)
)
#qcs_idle_equiv_bit.append(QuantumCircuit())
qcs_idle_equiv_bit.append(
build_idle_error_correction_sequence(
build_qc(),
qc_init,
qc_encode_bit,
qc_error_channels,
qc_syndrome_bit,
qc_correction_bit,
qc_decode_bit,
qc_final_measure_invert,
initial_layout=initial_layout,
name=f"Idle {round_}"
)
)
fidelities_corr_bit = calculate_hellinger_fidelity(result_ideal, result_qcs_corr_bit)
fidelities_no_corr_bit = calculate_hellinger_fidelity(result_ideal, result_qcs_no_corr_bit)
fidelities_idle_equiv_bit = calculate_hellinger_fidelity(result_ideal, result_qcs_idle_equiv_bit)
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12, 6)
iters = range(1, num_rounds+1)
plt.plot(iters, fidelities_corr_bit, label="Bit flip code - correction")
plt.plot(iters, fidelities_no_corr_bit, label="Bit flip code - no correction")
plt.plot(iters, fidelities_idle_equiv_bit, label="Idle equivalent circuit")
plt.ylabel("Hellinger Fidelity")
plt.xlabel("Correction cycles")
plt.xticks(iters)
plt.suptitle("Comparing the performance of error-correction strategies for multiple correction cycles")
plt.title(f"Idle period: {idle_period*1e6}us - Qubit layout: {initial_layout} - Dynamical decoupling: {dynamical_decoupling}")
plt.legend(loc="upper right")
result_qcs_corr_bit = job_qcs_corr_bit.result()
result_qcs_no_corr_bit = job_qcs_no_corr_bit.result()
result_qcs_idle_equiv_bit = job_qcs_idle_equiv_bit.result()
def build_412(circuit,qubits,parity,layout='zxz') -> QuantumCircuit:
### qubits
# qubits: list of 7 qubits
# ex. [q[0],q[1],q[2],q[3],q[4],q[5],q[6],q[7]]
#where first 4 are data qubits, 5 and 6 are flags, and 7 is ancilla (see image from heavy-hex paper)
### parity
# parity is 'x' or 'z'
### initialized_ancilla = True or False.
# If True, will prepare f1,f2,a1 in correct basis and apply post rotation before measurement
# Will include barrier after prep and before post rot if True
[d1,d2,d3,d4,f1,f2,a1]=qubits
if layout == 'zxz':
if parity == 'x':
circuit.h(a1)
circuit.cx(a1,f2)
circuit.cx(a1,f1)
circuit.cx(f2,d1)
circuit.cx(f1,d4)
circuit.cx(f2,d2)
circuit.cx(f1,d3)
circuit.cx(a1,f2)
circuit.cx(a1,f1)
circuit.h(a1)
if parity == 'z':
circuit.cx(d2,f2)
circuit.cx(d1,f2)
circuit.cx(d3,f1)
circuit.cx(d4,f1)
return circuit
elif layout == 'xzx':
if parity == 'z':
circuit.h(f1)
circuit.h(f2)
circuit.cx(f2,a1)
circuit.cx(f1,a1)
circuit.cx(d1,f2)
circuit.cx(d3,f1)
circuit.cx(d2,f2)
circuit.cx(d4,f1)
circuit.cx(f2,a1)
circuit.cx(f1,a1)
circuit.h(f1)
circuit.h(f2)
if parity == 'x':
circuit.h(f1)
circuit.h(f2)
circuit.cx(f2,d2)
circuit.cx(f2,d1)
circuit.cx(f1,d3)
circuit.cx(f1,d4)
circuit.h(f1)
circuit.h(f2)
return circuit
# Complete - how do you do the Z and X checks for this code? How do you decode the errors?
# See: https://arxiv.org/abs/2110.04285
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
# Step 1: setup
from qiskit import QuantumCircuit
from qiskit_aer import AerSimulator
backend = AerSimulator(method="statevector")
# Step 2: conditional initialisation
qc = QuantumCircuit(1, 2)
qc.h(0) # This is just a stand-in for more complex real-world setup.
qc.measure(0, 0)
# Unlike c_if, we can have more than one instruction in the block, and it only
# requires a single evaluation of the condition. That's especially important if
# the bit is written to part way through the block.
with qc.if_test((0, True)):
qc.reset(0)
qc.x(0)
qc.measure(0, 1)
qc.draw()
backend.run(qc).result().get_counts() # {'00': 0.5, '11': 0.5}
# Step 3: repeat until success.
# Previously this wasn't representable in Qiskit at all, because we didn't have
# any concept of a run-time loop.
qc = QuantumCircuit(1, 2)
qc.h(0)
qc.measure(0, 0)
with qc.while_loop((0, False)):
qc.reset(0)
qc.h(0)
qc.measure(0, 0)
qc.measure(0, 1)
qc.draw()
backend.run(qc).result().get_counts() # {'11': 1}
# Step 4: repeat until success, with limits on the number of iterations.
qc = QuantumCircuit(1, 2)
with qc.for_loop(range(2)):
qc.reset(0)
qc.h(0)
qc.measure(0, 0)
with qc.if_test((0, True)):
# 'break' (and 'continue') is also supported by Aer, but won't be part
# of the initial transpiler support for swap routing.
qc.break_loop()
qc.measure(0, 1)
backend.run(qc).result().get_counts() # {'00': 0.25, '11': 0.75}
# Step 5: converting old-style c_if to IfElseOp.
# This transpiler pass is available in Terra 0.22 for backends to use in their
# injected pass-manager stages, so they can begin transitioning to the new forms
# immediately, and can start deprecating support for handling old-style
# `condition`.
from qiskit.transpiler.passes import ConvertConditionsToIfOps
qc = QuantumCircuit(1, 1)
qc.h(0)
qc.measure(0, 0)
qc.x(0).c_if(0, False)
qc.draw()
pass_ = ConvertConditionsToIfOps()
pass_(qc).draw()
# Other things mentioned:
#
# - The `QuantumCircuit.for_loop` context manager returns a `Parameter` object
# that can be used in angle expressions in the loop body's gates. The
# OpenQASM 3 exporter understands this.
#
# - The `QuantumCircuit.if_test` context manager returns a context-manager
# object that can be entered immediately on termination of the "true" body, to
# make an "else" body. For example:
#
# with qc.if_test((0, False)) as else_:
# qc.x(0)
# with else_:
# qc.z(0)
#
# - The OpenQASM 3 exporter supports all these constructs. The easiest path to
# access that is `qiskit.qasm3.dumps`.
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, transpile
from qiskit.transpiler import CouplingMap
from qiskit.converters import circuit_to_dag, dag_to_circuit
from qiskit.visualization.gate_map import plot_gate_map, plot_coupling_map, plot_circuit_layout
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
%matplotlib inline
# OpenQASM 2.0 control flow
qr, cr = QuantumRegister(2), ClassicalRegister(1)
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.measure(0, 0)
qc.x(1).c_if(cr, 1)
qc.draw()
# `if` statement
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.measure(0, 0)
with qc.if_test((cr[0], 0)):
qc.x(1)
qc.draw()
qc = QuantumCircuit(2)
with qc.for_loop(range(3)) as i:
qc.cx(0, 1)
qc.rx(i * pi/2, 0)
qc.draw()
qr, cr = QuantumRegister(3), ClassicalRegister(3)
qc = QuantumCircuit(qr, cr)
qc.h(qr)
qc.cx(0, [1, 2])
qc.measure(qr, cr)
with qc.while_loop((cr, 0)):
qc.reset(qr)
qc.cx(0, [1, 2])
qc.measure(qr, cr)
qc.draw()
print(f'circuit depth = {qc.depth()}')
print(f'dag depth = {circuit_to_dag(qc).depth(recurse=True)}')
qc = QuantumCircuit(2)
qc.rz(0.2, 0)
qc.cx(0, 1)
with qc.for_loop((range(3))):
qc.rx(0.3, 1)
qc.rx(0.2, 1)
qc.cx(0, 1)
qc.cx(0, 1)
cqc = transpile(qc, basis_gates=["u", "cx", "for_loop"])
cqc.draw()
cqc.data[2].operation.params[2].draw()
cmap = CouplingMap.from_line(4)
plot_coupling_map(4, [(0, i) for i in range(4)], list(cmap.get_edges()))
qc = QuantumCircuit(4)
with qc.for_loop(range(2)):
qc.cx(0, [1, 2, 3])
qc.barrier()
cqc = transpile(qc, basis_gates=["u", "cx", "swap", "for_loop"], coupling_map=cmap)
cqc.data[0].operation.params[2].draw()
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import matplotlib.pyplot as plt
from qiskit import Aer
from qiskit.algorithms.state_fidelities import ComputeUncompute
from qiskit.circuit.library import ZZFeatureMap
from qiskit.primitives import Sampler
from qiskit.utils import algorithm_globals, QuantumInstance
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from qiskit_machine_learning.algorithms import QSVC
from qiskit_machine_learning.kernels import QuantumKernel, FidelityQuantumKernel
algorithm_globals.random_seed = 123456
features, labels = make_classification(
n_samples=20,
n_features=2,
n_redundant=0,
random_state=algorithm_globals.random_seed
)
features = MinMaxScaler().fit_transform(features)
plt.scatter(features[:, 0], features[:, 1], c=labels)
train_features, test_feature, train_labels, test_labels = train_test_split(
features, labels,
train_size=15,
random_state=algorithm_globals.random_seed
)
qi_sv = QuantumInstance(
Aer.get_backend("statevector_simulator"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
qk_old = QuantumKernel(feature_map=ZZFeatureMap(2), quantum_instance=qi_sv)
qsvc = QSVC(quantum_kernel=qk_old)
qsvc.fit(train_features, train_labels)
qsvc.score(test_feature, test_labels)
# these are optional
sampler = Sampler()
fidelity = ComputeUncompute(sampler)
qk_new = FidelityQuantumKernel(feature_map=ZZFeatureMap(2), fidelity=fidelity)
qsvc = QSVC(quantum_kernel=qk_new)
qsvc.fit(train_features, train_labels)
qsvc.score(test_feature, test_labels)
import qiskit.tools.jupyter
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
%matplotlib inline
import numpy as np
from qiskit_experiments.visualization import (
CurvePlotter,
IQPlotter,
MplDrawer,
PlotStyle,
)
from generate_data import generate_data
data = generate_data()
data_keys = set()
for _, x in data.items():
data_keys.update(x.keys())
print(data_keys)
# Create plotter and set options and style.
plotter = CurvePlotter(MplDrawer())
plotter.set_options(
plot_sigma=[
(1.0, 0.5)
], # Controls confidence-intervals for `y_interp_err` data-keys.
)
plotter.set_figure_options(
series_params={
"A": {"symbol": "o", "color": "C0", "label": "Qubit 0"},
"B": {"symbol": "X", "color": "C1", "label": "Qubit 1"},
"C": {"symbol": "v", "color": "k", "label": "Ideal 0"},
"D": {"symbol": "^", "color": "k", "label": "Ideal 1"},
},
xlabel="Parameter",
ylabel="${\\langle{}Z\\rangle{}}$",
figure_title="Expectation Values",
)
plotter.figure()
plotter.set_series_data("A", x=data["A"]["x"], y=data["A"]["y"])
plotter.figure()
plotter.set_series_data(
"A",
x=data["A"]["x"],
y=data["A"]["y"],
x_interp=data["A"]["x_interp"],
y_interp=data["A"]["y_interp"],
y_interp_err=data["A"]["y_interp_err"],
)
plotter.figure()
plotter.set_series_data(
"B",
x=data["B"]["x"],
y=data["B"]["y"],
x_interp=data["B"]["x_interp"],
y_interp=data["B"]["y_interp"],
y_interp_err=data["B"]["y_interp_err"],
)
plotter.figure()
plotter.set_series_data(
"C",
x=data["C"]["x"],
y=data["C"]["y"],
x_interp=data["C"]["x_interp"],
y_interp=data["C"]["y_interp"],
)
plotter.set_series_data(
"D",
x=data["D"]["x"],
y=data["D"]["y"],
x_interp=data["D"]["x_interp"],
y_interp=data["D"]["y_interp"],
)
plotter.set_options(
style=PlotStyle(legend_loc="lower left"),
)
plotter.figure()
plotter.set_options(subplots=(2, 1))
plotter.set_figure_options(
series_params={
"A": {
"symbol": "o",
"color": "C0",
"label": "Qubit 0",
"canvas": 0,
}, # Here we add "canvas"
"B": {
"symbol": "X",
"color": "C1",
"label": "Qubit 1",
"canvas": 1,
}, # entries to the
"C": {
"symbol": "v",
"color": "k",
"label": "Ideal 0",
"canvas": 0,
}, # `series_params` figure
"D": {"symbol": "^", "color": "k", "label": "Ideal 1", "canvas": 1}, # option.
},
)
plotter.figure()
from generate_data import generate_iq_data
iq_data, iq_discriminator = generate_iq_data()
data_keys = set()
for _, x in iq_data.items():
data_keys.update(x.keys())
print(data_keys)
# Create plotter
iq_plotter = IQPlotter(MplDrawer())
# Set options.
iq_plotter.set_options(
style=PlotStyle(
figsize=(6, 4),
legend_loc=None,
textbox_rel_pos=(0.18, -0.30),
),
)
iq_plotter.set_figure_options(
series_params={
"0": {"label": "$|0>$"},
"1": {"label": "$|1>$"},
"2": {"label": "$|2>$"},
},
xlabel="In-Phase",
ylabel="Quad.",
xval_unit="Arb.",
yval_unit="Arb.",
figure_title="Single-Shots",
)
# Set data.
for series, series_data in iq_data.items():
iq_plotter.set_series_data(series, **series_data)
# Generate figure.
iq_plotter.figure()
iq_plotter.set_supplementary_data(discriminator=iq_discriminator)
iq_plotter.figure()
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 2, figsize=(10, 6))
plotter.set_options(axis=axes[1])
iq_plotter.set_options(axis=axes[0])
# Generate both figures, which adds graphics to each subplot.
plotter.figure()
iq_plotter.figure()
# Tighten layout
plt.tight_layout()
|
https://github.com/Qiskit/feedback
|
Qiskit
|
def compose_1q(lhs: Integral, rhs: Integral) -> Integral:
"""Return the composition of 1-qubit clifford integers."""
return _CLIFFORD_COMPOSE_1Q[lhs, rhs]
def compose_2q(lhs: Integral, rhs: Integral) -> Integral:
"""Return the composition of 2-qubit clifford integers."""
num = lhs
for layer, idx in enumerate(_layer_indices_from_num(rhs)):
circ = _CLIFFORD_LAYER[layer][idx]
num = _compose_num_with_circuit_2q(num, circ)
return num
def _compose_num_with_circuit_2q(num: Integral, qc: QuantumCircuit) -> Integral:
"""Compose a number that represents a Clifford, with a Clifford circuit, and return the
number that represents the resulting Clifford."""
lhs = num
for inst in qc:
qubits = tuple(qc.find_bit(q).index for q in inst.qubits)
rhs = _num_from_2q_gate(op=inst.operation, qubits=qubits)
try:
lhs = _CLIFFORD_COMPOSE_2Q_GATE[lhs, rhs]
except KeyError as err:
raise Exception(f"_CLIFFORD_COMPOSE_2Q_GATE[{lhs}][{rhs}]") from err
return lhs
from qiskit.providers.fake_provider import FakeManila
from qiskit_experiments.library.randomized_benchmarking import StandardRB
backend=FakeManila()
%%time
# create experiment
exp = StandardRB(
qubits=[3],
lengths=list(range(1, 1000, 100)),
num_samples=3,
seed=1234,
backend=backend,
)
# run experiment
expdata = exp.run().block_for_results()
display(expdata.figure(0))
num_lengths = 10
# run several experiments with changing max clifford length (max_length)
for max_length in [1000, 2000, 3000, 4000, 5000]: # [100, 200, 300, 400, 500] for 2Q RBs
exp = StandardRB(
qubits=[2], # for 1Q RBs ([2, 1] for 2Q RBs)
lengths=np.arange(1, max_length, max_length // num_lengths), # see below for examples
backend=FakeManilaV2(),
num_samples=3,
full_sampling=False,
)
# measure the time to generate transpiled RB circuits
exp._transpiled_circuits()
import numpy as np
max_length, num_lengths = 1000, 10
print(np.arange(1, max_length, max_length // num_lengths)) # = range(1, 1000, 100)
max_clifford_length, num_lengths = 2000, 10
print(np.arange(1, max_length, max_length // num_lengths)) # = range(1, 2000, 100)
import copy
from qiskit.circuit.library.standard_gates import SXGate
from qiskit.providers.fake_provider import FakeManilaV2
from qiskit.pulse import Schedule, InstructionScheduleMap
from qiskit_experiments.library.randomized_benchmarking import StandardRB
qubits=(2,)
lengths=list(range(1, 1000, 100))
num_samples=3
seed=777
backend = FakeManilaV2()
%%time
qubits = (2,)
my_sched = Schedule(name="custom_sx_gate")
my_backend = copy.deepcopy(backend)
my_backend.target["sx"][qubits].calibration = my_sched
exp = StandardRB(qubits=qubits, lengths=lengths, num_samples=num_samples, backend=my_backend)
transpiled = exp._transpiled_circuits()
%%time
qubits = (2,)
my_sched = Schedule(name="custom_sx_gate")
my_inst_map = InstructionScheduleMap()
my_inst_map.add(SXGate(), qubits, my_sched)
exp = StandardRB(qubits=qubits, lengths=lengths, num_samples=num_samples, backend=backend)
exp.set_transpile_options(inst_map=my_inst_map)
transpiled = exp._transpiled_circuits()
from qiskit_experiments.library.randomized_benchmarking import StandardRB, InterleavedRB
from qiskit.providers.fake_provider import FakeManila
qubits=[2]
lengths=list(range(1, 1000, 100))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples)
%%time
# rb_speedup
exp = StandardRB(
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
transpiled = exp._transpiled_circuits()
# %%time
# # Main
# exp = StandardRB(
# qubits=qubits,
# lengths=lengths,
# num_samples=num_samples,
# seed=seed,
# backend=backend,
# )
# transpiled = exp._transpiled_circuits()
qubits=[3, 2]
lengths=list(range(1, 200, 20))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples)
%%time
# rb_speedup
exp = StandardRB(
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
transpiled = exp._transpiled_circuits()
# %%time
# # Main
# exp = StandardRB(
# qubits=qubits,
# lengths=lengths,
# num_samples=num_samples,
# seed=seed,
# backend=backend,
# )
# transpiled = exp._transpiled_circuits()
from qiskit.circuit import Delay
qubits=[2]
lengths=list(range(1, 1000, 100))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples*2)
from qiskit_experiments.framework.backend_timing import BackendTiming
timing = BackendTiming(backend)
duration_in_dt = timing.round_delay(time=backend.properties().gate_length("sx", qubits)) # 160[dt]
%%time
# rb_speedup
exp = InterleavedRB(
interleaved_element=Delay(duration=duration_in_dt),
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
transpiled = exp._transpiled_circuits()
# %%time
# # Main
# exp = InterleavedRB(
# interleaved_element=Delay(duration=duration_in_dt),
# qubits=qubits,
# lengths=lengths,
# num_samples=num_samples,
# seed=seed,
# backend=backend,
# )
# transpiled = exp._transpiled_circuits()
qubits=[3, 2]
lengths=list(range(1, 200, 20))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples*2)
from qiskit.circuit import Delay, QuantumCircuit
delay_qc = QuantumCircuit(2)
delay_qc.delay(1600, 0)
delay_qc.delay(1600, 1)
delay_qc.draw()
%%time
# rb_speedup
exp = InterleavedRB(
interleaved_element=delay_qc,
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
transpiled = exp._transpiled_circuits()
# %%time
# # Main
# exp = InterleavedRB(
# interleaved_element=delay_qc,
# qubits=qubits,
# lengths=lengths,
# num_samples=num_samples,
# seed=seed,
# backend=backend,
# )
# transpiled = exp._transpiled_circuits()
from qiskit.compiler import transpile
from qiskit.providers.fake_provider import FakeManila
# common RB configuration
qubits=[2]
lengths=list(range(1, 1000, 100))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples)
from qiskit.ignis.verification import randomized_benchmarking as rb
%%time
# Ignis
# create the RB circuits
circs, _ = rb.randomized_benchmarking_seq(
rb_pattern=[qubits],
length_vector=lengths,
nseeds=num_samples,
seed_offset=seed,
)
# transpile (for basis translation)
transpiled = []
for qcs in circs:
for qc in qcs:
transpiled.append(transpile(qc, backend=backend, optimization_level=0))
# transpiled[0].draw(idle_wires=False)
from qiskit_experiments.library.randomized_benchmarking import StandardRB, InterleavedRB
%%time
# Experiments
# create experiment object
exp = StandardRB(
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
# compute transpiled circuits
transpiled_qe = exp._transpiled_circuits()
# transpiled_qe[0].draw(idle_wires=False)
# common RB configuration
qubits=[3, 2]
lengths=list(range(1, 200, 20))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples)
from qiskit.ignis.verification import randomized_benchmarking as rb
%%time
# Ignis
# create the RB circuits
circs, _ = rb.randomized_benchmarking_seq(
rb_pattern=[qubits],
length_vector=lengths,
nseeds=num_samples,
seed_offset=seed,
)
# transpile (for basis translation)
transpiled = []
for qcs in circs:
for qc in qcs:
transpiled.append(transpile(qc, backend=backend, optimization_level=0))
# transpiled[0].draw(idle_wires=False)
from qiskit_experiments.library.randomized_benchmarking import StandardRB, InterleavedRB
%%time
# Experiments
# create experiment object
exp = StandardRB(
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
# compute transpiled circuits
transpiled_qe = exp._transpiled_circuits()
# transpiled_qe[0].draw(idle_wires=False)
# common RB configuration
from qiskit.circuit import Delay
qubits=[2]
lengths=list(range(1, 1000, 100))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples*2)
%%time
# Ignis
# create the RB circuits
circuits, xdata, circuits_interleaved = rb.randomized_benchmarking_seq(
rb_pattern=[qubits],
length_vector=lengths,
nseeds=num_samples,
seed_offset=seed,
interleaved_elem=[Delay(duration=160)]
)
# transpile (for basis translation)
transpiled = []
for qcs in circuits:
for qc in qcs:
transpiled.append(transpile(qc, backend=backend, optimization_level=0))
for qcs in circuits_interleaved:
for qc in qcs:
transpiled.append(transpile(qc, backend=backend, optimization_level=0))
# transpiled[30].draw(idle_wires=False)
%%time
# Experiments
from qiskit_experiments.framework.backend_timing import BackendTiming
timing = BackendTiming(backend)
duration_in_dt = timing.round_delay(time=backend.properties().gate_length("sx", qubits)) # 160[dt]
# create experiment object
exp = InterleavedRB(
interleaved_element=Delay(duration=duration_in_dt),
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
# compute transpiled circuits
transpiled_qe = exp._transpiled_circuits()
# transpiled_qe[30].draw(idle_wires=False)
duration_in_dt
timing.round_delay(time=100e-9) # 100[ns] == 450[dt]
# common RB configuration
from qiskit.circuit import Delay, QuantumCircuit
qubits=[3, 2]
lengths=list(range(1, 200, 20))
num_samples=3
seed=777
backend = FakeManila()
print("Cliford lengths:", lengths)
print("Number of circuits =", len(lengths)*num_samples*2)
delay_qc = QuantumCircuit(2)
delay_qc.delay(1600, 0)
delay_qc.delay(1600, 1)
delay_qc.draw()
%%time
# Ignis
# create the RB circuits
circuits, xdata, circuits_interleaved = rb.randomized_benchmarking_seq(
rb_pattern=[qubits],
length_vector=lengths,
nseeds=num_samples,
seed_offset=seed,
interleaved_elem=[delay_qc]
)
# transpile (for basis translation)
transpiled = []
for qcs in circuits:
for qc in qcs:
transpiled.append(transpile(qc, backend=backend, optimization_level=0))
for qcs in circuits_interleaved:
for qc in qcs:
transpiled.append(transpile(qc, backend=backend, optimization_level=0))
# transpiled[30].draw(idle_wires=False)
%%time
# Experiments
# create experiment object
exp = InterleavedRB(
interleaved_element=delay_qc,
qubits=qubits,
lengths=lengths,
num_samples=num_samples,
seed=seed,
backend=backend,
)
# compute transpiled circuits
transpiled_qe = exp._transpiled_circuits()
# transpiled_qe[30].draw(idle_wires=False)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.quantum_info import SparsePauliOp
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit.primitives import Estimator
estimator = Estimator()
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
circuit = random_circuit(2, 2, seed=1).decompose(reps=1)
observable = SparsePauliOp("IY")
job = estimator.run(circuit, observable)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Expectation value: {result.values[0]}")
circuits = (
random_circuit(2, 2, seed=0).decompose(reps=1),
random_circuit(2, 2, seed=1).decompose(reps=1),
)
observables = (
SparsePauliOp("XZ"),
SparsePauliOp("IY"),
)
job = estimator.run(circuits, observables)
result = job.result()
print(f">>> Expectation values: {result.values.tolist()}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
observable = SparsePauliOp("ZI")
parameter_values = [0, 1, 2, 3, 4, 5]
job = estimator.run(circuit, observable, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Parameter values: {parameter_values}")
print(f">>> Expectation value: {result.values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
# QiskitRuntimeService.save_account(channel="ibm_quantum", token="MY_API_TOKEN") was used to save account.
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Estimator
estimator = Estimator(session=backend)
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Metadata: {result.metadata[0]}")
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value: {result.values[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
estimator = Estimator()
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the first run: {result.values[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the second run: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(sampler_circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import os
from qiskit import QuantumCircuit, execute
from qiskit.compiler import transpile
from qiskit.providers.aer import Aer
from qiskit.circuit.random import random_circuit
from rustworkx.visualization import mpl_draw
from qiskit_iqm import IQMProvider
qc_1 = random_circuit(5, 5, measure=True)
qc_2 = random_circuit(5, 5, measure=True)
backend = Aer.get_backend('qasm_simulator')
job = execute([qc_1, qc_2], backend, shots=1000)
result = job.result()
from qiskit_iqm import IQMProvider
provider = IQMProvider(os.environ['IQM_SERVER_URL'])
from qiskit.providers.provider import Provider
isinstance(provider, Provider)
backend = provider.get_backend()
from qiskit.providers import BackendV2
isinstance(backend, BackendV2)
backend.num_qubits
backend.target.qargs
backend.index_to_qubit_name(3)
mpl_draw(backend.coupling_map.graph)
backend.operation_names
qc = random_circuit(5, 5, measure=True, seed=321)
qc.draw(output='mpl')
qc_transpiled = transpile(qc, backend=backend, optimization_level=3)
qc_transpiled.draw(output='mpl')
qc_1 = random_circuit(5, 5, measure=True, seed=123)
qc_2 = random_circuit(5, 5, measure=True, seed=456)
job = execute([qc_1, qc_2], backend, shots=1000)
from qiskit.providers import JobV1
isinstance(job, JobV1)
job.status()
|
https://github.com/Qiskit/feedback
|
Qiskit
|
!git clone --branch qamp-qiskit-demodays https://github.com/qiskit-community/qiskit-qec.git /Users/ruihaoli/qiskit-qec
%cd ../qiskit-qec
!pip install -r requirements.txt
import numpy as np
from qiskit_qec.operators.xp_pauli import XPPauli
from qiskit_qec.operators.xp_pauli_list import XPPauliList
# XPPauli object
a = XPPauli(
data=np.array([0, 3, 1, 6, 4, 3], dtype=np.int64), phase_exp=11, precision=4
)
unique_a = a.unique_vector_rep()
print(unique_a.matrix)
print(unique_a.x)
print(unique_a.z)
print(unique_a._phase_exp)
# XPPauliList object
b = XPPauliList(
data=np.array([[1, 0, 1, 1, 5, 3, 5, 4], [1, 0, 1, 0, 1, 5, 2, 0]], dtype=np.int64),
phase_exp=np.array([4, 7]),
precision=6,
)
unique_b = b.unique_vector_rep()
print(unique_b.matrix)
print(unique_b._phase_exp)
a = XPPauli(
data=np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0]), phase_exp=12, precision=8
)
rescaled_a = a.rescale_precision(new_precision=2, inplace=False)
print(rescaled_a.matrix)
print(rescaled_a._phase_exp)
# Case where it is not possible to rescale
a.rescale_precision(new_precision=3, inplace=False)
a = XPPauli(data=np.array([1, 1, 2, 2, 1, 2, 3, 3]), phase_exp=0, precision=8)
antisym_op = a.antisymmetric_op(int_vec=a.z)
print(antisym_op.matrix)
print(antisym_op._phase_exp)
a = XPPauli(data=np.array([0, 1, 0, 0, 2, 0]), phase_exp=6, precision=4)
b = XPPauli(data=np.array([1, 1, 1, 3, 3, 0]), phase_exp=2, precision=4)
product = a.compose(b, inplace=False)
print(product.matrix)
print(product._phase_exp)
# Multiplying two XPPauliList objects
a = XPPauliList(
data=np.array([[1, 0, 1, 1, 5, 3, 5, 4], [1, 0, 1, 0, 1, 5, 2, 0]]),
phase_exp=np.array([4, 7]),
precision=6,
)
b = XPPauliList(
data=np.array([[1, 0, 0, 1, 4, 1, 0, 1], [0, 1, 1, 0, 1, 3, 0, 5]]),
phase_exp=np.array([11, 2]),
precision=6,
)
product = a.compose(b, inplace=False)
print(product.matrix)
print(product._phase_exp)
a = XPPauliList(
data=np.array([[1, 0, 1, 1, 5, 3, 5, 4], [1, 0, 1, 1, 5, 4, 1, 5]]),
phase_exp=[1, 2],
precision=6,
)
a1 = a.power(n=5)
print(a1.matrix)
print(a1._phase_exp, "\n")
a2 = a.power(n=[3, 4])
print(a2.matrix)
print(a2._phase_exp)
import qiskit_qec.arithmetic.modn as modn
# Quotient of a/b in the ring Z_N
a, b, N = 18, 3, 5
q = modn.quo(a, b, N)
print(q)
print(q == (a % N) // (b % N))
# Divisor of a/b in the ring Z_N
a, b, N = 24, 8, 7
d = modn.div(a, b, N)
print(d)
print((b * d) % N == a % N)
# Annihilator of a in the ring Z/nZ
a, N = 10, 5
ann = modn.ann(a, N)
print(ann)
print((a * ann) % N == 0)
from qiskit_qec.linear.matrix import do_row_op
mat = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
N = 4
print(mat, "\n")
# Swap rows 0 and 1
mat1 = do_row_op(mat, ("swap", [0, 1], []), N)
print(mat1, "\n")
# Append the product of row 0 by a scalar (3) to the end of matrix
mat2 = do_row_op(mat, ("append", [0], [3]), N)
print(mat2)
from qiskit_qec.linear.matrix import howell, howell_complete
mat = np.array([[8, 5, 5], [0, 9, 8], [0, 0, 10]])
N = 12
howell_mat = howell(mat, N)
print(howell_mat, "\n")
howell_mat, transform_mat, kernel_mat = howell_complete(mat, N)
print(howell_mat, "\n")
print(transform_mat, "\n")
print(kernel_mat)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
%load_ext autoreload
%autoreload 2
import warnings
warnings.filterwarnings("ignore")
from qiskit_metal import designs, MetalGUI
from qiskit_metal.qlibrary.sample_shapes.rectangle import Rectangle
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
from qiskit_metal.qlibrary.tlines.straight_path import RouteStraight
from qiskit_metal.qlibrary.tlines.meandered import RouteMeander
from qiskit_metal.qlibrary.terminations.open_to_ground import OpenToGround
from qiskit_metal.qlibrary.terminations.short_to_ground import ShortToGround
design = designs.MultiPlanar({}, overwrite_enabled=True, layer_stack_filename="layer_stack.txt")
design.variables.sample_holder_top = '320 um'
design.variables.sample_holder_bottom ='250 um'
gui = MetalGUI(design)
design.ls.ls_df
design.delete_all_components()
# Define a dictionary for connection pad options for the transmon
conn_pads1 = dict(connection_pads = dict(coupler = dict(loc_W=1, loc_H=1),
readout = dict(loc_W=-1, loc_H=-1)))
conn_pads2 = dict(pad_width = '550 um',
pad_gap = '15um',
connection_pads = dict(coupler = dict(loc_W=-1, loc_H=1,pad_width='275um', pad_cpw_shift = '12.5um'),
readout = dict(loc_W=1, loc_H=-1,pad_width='275um')))
# Create a TransmonPocket6 object
q1 = TransmonPocket(design, "Q1", options=dict(pos_x='-0.5mm', pos_y='+0.0mm', layer=1, **conn_pads1))
q2 = TransmonPocket(design, "Q2", options=dict(pos_x='+0.5mm', pos_y='+0.0mm', layer=1, **conn_pads2))
# Rebuild and autoscale the GUI
gui.rebuild()
gui.autoscale()
gui.screenshot()
# Connect Transmons with direct coupler
coupler_options = dict(pin_inputs = dict(start_pin=dict(component=q2.name, pin='coupler'),
end_pin=dict(component=q1.name, pin='coupler')))
coupler_connector = RouteStraight(design, 'coupler_connector', options= coupler_options)
# Rebuild and autoscale the GUI
gui.rebuild()
gui.autoscale()
gui.screenshot()
# Create open-to-ground elements for readout resonators
otg11_options = dict(pos_x='-1.5mm', pos_y='0.0mm', orientation='180', layer='3')
otg21_options = dict(pos_x='+1.5mm', pos_y='0.0mm', orientation='0', layer='3')
otg12_options = dict(pos_x='-0.955mm', pos_y='-0.195mm', orientation='0', layer='3')
otg22_options = dict(pos_x='+0.955mm', pos_y='-0.1875mm', orientation='180', layer='3')
otg11 = OpenToGround(design, 'otg11', options=otg11_options)
otg21 = OpenToGround(design, 'otg21', options=otg21_options)
otg12 = OpenToGround(design, 'otg12', options=otg12_options)
otg22 = OpenToGround(design, 'otg22', options=otg22_options)
# Create readout resonators
readout1_options = dict(total_length = '5.97mm',
fillet = '40um',
pin_inputs = dict(start_pin = dict(component = otg11.name, pin = 'open'),
end_pin = dict(component = otg12.name, pin = 'open')),
lead=dict(start_straight='150um'),
meander=dict(spacing = '100um', asymmetry = '0'),
layer = '3')
readout2_options = dict(total_length = '5.97mm',
fillet = '40um',
pin_inputs = dict(start_pin = dict(component = otg22.name, pin = 'open'),
end_pin = dict(component = otg21.name, pin = 'open')),
lead=dict(start_straight='120um'),
meander=dict(spacing = '100um', asymmetry = '0.2'),
layer = '3')
readout1 = RouteMeander(design, 'readout1', options=readout1_options)
readout2 = RouteMeander(design, 'readout2', options=readout2_options)
# Rebuild and autoscale the GUI
gui.rebuild()
gui.autoscale()
gui.screenshot()
# Create via components
via_positions = [('-0.94mm','-0.1950mm'),('+0.94mm','-0.1875mm')]
for layer in ['1','2','3']:
for via, positions in enumerate(via_positions):
for subtract in zip([False,True],['30um','42um']):
via_size = '20um' if layer == '2' else subtract[1]
actual_layer = '5' if (layer == '2' and subtract[0]) else layer
via_options = dict(width = via_size,
height = via_size,
pos_x = positions[0],
pos_y = positions[1],
subtract = subtract[0],
layer = actual_layer,
orientation = '0',
helper = 'False',
chip = 'main')
name = 'via'+str(via+1)+'_layer'+actual_layer+('' if not subtract[0] else '_sub')
Rectangle(design, name, via_options)
# Rebuild and autoscale the GUI
gui.rebuild()
gui.autoscale()
gui.screenshot()
from qiskit_metal.renderers.renderer_elmer.elmer_renderer import QElmerRenderer
elmer_renderer = QElmerRenderer(design, layer_types=dict(metal=[1,2,3], dielectric=[4,5]))
elmer_renderer.render_design(mesh_geoms=True,
skip_junctions=True,
open_pins=[("Q1", "readout"),("Q2", "readout")],
omit_ground_for_layers=[2])
#elmer_renderer.launch_gmsh_gui()
elmer_renderer.export_mesh()
elmer_renderer.add_solution_setup('capacitance')
elmer_renderer.run('capacitance')
# Display capacitance matrix in [fF]
cap_matrix = elmer_renderer.capacitance_matrix
cap_matrix
elmer_renderer.display_post_processing_data()
elmer_renderer.close()
# Mesh Q1-only design
elmer_renderer = QElmerRenderer(design, layer_types=dict(metal=[1,2,3], dielectric=[4,5]))
select = ['Q1', 'via1_layer1', 'via1_layer2', 'via1_layer3',
'via1_layer1_sub', 'via1_layer5_sub', 'via1_layer3_sub']
elmer_renderer.render_design(selection=select,
open_pins=[('Q1', 'coupler'), ('Q1', 'readout')],
skip_junctions=True,
omit_ground_for_layers=[2])
elmer_renderer.launch_gmsh_gui()
# Solve Q1-only mode and save Cap Matrix
elmer_renderer.export_mesh()
elmer_renderer.add_solution_setup('capacitance')
elmer_renderer.run('capacitance')
cap_matrix_q1 = elmer_renderer.capacitance_matrix
#elmer_renderer.close()
cap_matrix_q1
elmer_renderer.display_post_processing_data()
elmer_renderer.close()
# Mesh Q2-only design
elmer_renderer = QElmerRenderer(design, layer_types=dict(metal=[1,2,3], dielectric=[4,5]))
select = ['Q2', 'via2_layer1', 'via2_layer2', 'via2_layer3',
'via2_layer1_sub', 'via2_layer5_sub', 'via2_layer3_sub']
elmer_renderer.render_design(selection=select,
open_pins=[('Q2', 'coupler'), ('Q2', 'readout')],
skip_junctions=True, omit_ground_for_layers=[2])
#elmer_renderer.launch_gmsh_gui()
# Solve Q2-only mode and save Cap Matrix
elmer_renderer.export_mesh()
elmer_renderer.add_solution_setup('capacitance')
elmer_renderer.run('capacitance')
cap_matrix_q2 = elmer_renderer.capacitance_matrix
elmer_renderer.close()
cap_matrix_q2
%matplotlib inline
import scqubits as scq
from scipy.constants import speed_of_light as c_light
import matplotlib.pyplot as plt
from qiskit_metal.analyses.quantization.lumped_capacitive import load_q3d_capacitance_matrix
from qiskit_metal.analyses.quantization.lom_core_analysis import CompositeSystem, Cell, Subsystem, QuantumSystemRegistry
# cell 1: 1st transmon (Q1)
opt1 = dict(node_rename = {'Q1_coupler_connector_pad': 'coupling', 'via1_layer3_rectangle': 'readout_1'},
cap_mat = cap_matrix_q1,
ind_dict = {('Q1_pad_top', 'Q1_pad_bot'):10}, # junction inductance in nH
jj_dict = {('Q1_pad_top', 'Q1_pad_bot'):'j1'},
cj_dict = {('Q1_pad_top', 'Q1_pad_bot'):2} # junction capacitance in fF
)
cell_1 = Cell(opt1)
# cell 2: 2nd transmon (Q2)
opt2 = dict(node_rename = {'Q2_coupler_connector_pad': 'coupling', 'via2_layer3_rectangle': 'readout_2'},
cap_mat = cap_matrix_q2,
ind_dict = {('Q2_pad_top', 'Q2_pad_bot'): 12}, # junction inductance in nH
jj_dict = {('Q2_pad_top', 'Q2_pad_bot'):'j2'},
cj_dict = {('Q2_pad_top', 'Q2_pad_bot'):2} # junction capacitance in fF
)
cell_2 = Cell(opt2)
# subsystem 1: transmon 1
transmon_1 = Subsystem(name='transmon_1', sys_type='TRANSMON', nodes=['j1'])
# subsystem 2: transmon 2
transmon_2 = Subsystem(name='transmon_2', sys_type='TRANSMON', nodes=['j2'])
# subsystem 3: readout resonator 1
q_opts = dict(f_res = 8, # resonator dressed frequency in GHz
Z0 = 50, # characteristic impedance in Ohm
vp = 0.404314 * c_light # phase velocity
)
res_1 = Subsystem(name='readout_1', sys_type='TL_RESONATOR', nodes=['readout_1'], q_opts=q_opts)
# subsystem 4: readout resonator 2
q_opts = dict(f_res = 7.6, # resonator dressed frequency in GHz
Z0 = 50, # characteristic impedance in Ohm
vp = 0.404314 * c_light # phase velocity
)
res_2 = Subsystem(name='readout_2', sys_type='TL_RESONATOR', nodes=['readout_2'], q_opts=q_opts)
composite_sys = CompositeSystem(subsystems=[transmon_1, transmon_2, res_1, res_2],
cells=[cell_1, cell_2],
grd_node='ground_plane',
nodes_force_keep=['readout_1', 'readout_2'])
cg = composite_sys.circuitGraph()
print(cg)
hilbertspace = composite_sys.create_hilbertspace()
print(hilbertspace)
hilbertspace = composite_sys.add_interaction()
hilbertspace.hamiltonian()
hamiltonian_results = composite_sys.hamiltonian_results(hilbertspace, evals_count=30)
composite_sys.compute_gs()
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit import QuantumCircuit
from qiskit.quantum_info import Clifford, random_clifford
from qiskit.synthesis.clifford import synth_clifford_greedy, synth_clifford_layers
clifford = random_clifford(5, seed=0)
print(clifford)
qc = synth_clifford_greedy(clifford)
qc.draw(output='mpl')
qc = synth_clifford_layers(clifford)
qc.draw(output='mpl')
qc.decompose().draw(output='mpl')
import numpy as np
from qiskit.synthesis.linear import synth_cnot_count_full_pmh, synth_cnot_depth_line_kms
mat = np.array([
[1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0],
])
qc = synth_cnot_count_full_pmh(mat)
qc.draw(output='mpl')
qc = synth_cnot_depth_line_kms(mat)
qc.draw(output='mpl')
from qiskit.synthesis.permutation import synth_permutation_depth_lnn_kms, synth_permutation_acg
pattern = [1, 2, 3, 4, 5, 6, 7, 0]
qc = synth_permutation_depth_lnn_kms(pattern)
qc.draw(output='mpl')
qc = synth_permutation_acg(pattern)
qc.draw(output='mpl')
from qiskit.circuit.library.generalized_gates import LinearFunction
from qiskit.transpiler.passes.optimization import CollectLinearFunctions
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(4, reps=2, entanglement='full')
qc = ansatz.decompose()
qc.draw(output='mpl')
qc_extracted = CollectLinearFunctions()(qc)
qc_extracted.draw(output='mpl')
from qiskit.compiler import transpile
qc_transpiled = transpile(qc_extracted, optimization_level=1)
qc_transpiled.draw(output='mpl')
qc = QuantumCircuit(2)
qc.z(0)
qc.cx(0, 1)
qc.z(0)
qc.cx(0, 1)
qc.z(0)
qc.cx(0, 1)
qc.draw(output='mpl')
qc_extracted = CollectLinearFunctions(do_commutative_analysis=True)(qc)
qc_extracted.draw(output='mpl')
from functools import partial
from qiskit.transpiler.passes.optimization.collect_and_collapse import CollectAndCollapse, collect_using_filter_function, collapse_to_operation
from qiskit.circuit.library.generalized_gates import PermutationGate
# make use of "collect_using_filter_function", where filter_function collects SWAP gates.
my_collect_function = partial(
collect_using_filter_function,
filter_function=lambda node: node.op.name=="swap" and getattr(node.op, "condition", None) is None,
split_blocks=False,
min_block_size=2,
)
# Given a quantum circuit with SWAP gates, create a PermutationGate out of it.
def _swap_block_to_permutation_gate(qc: QuantumCircuit):
nq = qc.num_qubits
pattern = list(range(nq))
for instruction in qc.data:
q0 = qc.find_bit(instruction.qubits[0]).index
q1 = qc.find_bit(instruction.qubits[1]).index
pattern[q0], pattern[q1] = pattern[q1], pattern[q0]
return PermutationGate(pattern)
# make use of "collapse_to_operation"
my_collapse_function = partial(collapse_to_operation, collapse_function=_swap_block_to_permutation_gate)
# And that's our pass
SwapCollector = CollectAndCollapse(collect_function=my_collect_function, collapse_function=my_collapse_function)
qc = QuantumCircuit(4)
qc.swap(0, 1)
qc.swap(1, 2)
qc.swap(2, 3)
qc.h(0)
qc.swap(0, 1)
qc.swap(1, 2)
qc.swap(2, 0)
qc.draw(output='mpl')
qc_extracted = SwapCollector(qc)
qc_extracted.draw(output='mpl')
qc = QuantumCircuit(6)
# Let's append a permutation gate and a linear function
qc.append(PermutationGate([1, 2, 3, 4, 0]), [1, 2, 3, 4, 5])
qc.h(range(6))
mat = np.array([
[1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0],
])
qc.append(LinearFunction(mat), [5, 4, 3, 2, 1, 0])
qc.draw(output='mpl')
from qiskit.compiler import transpile
qc_transpiled = transpile(qc, optimization_level=1)
qc_transpiled.draw(output='mpl')
from qiskit.transpiler.passes.synthesis.high_level_synthesis import HLSConfig, HighLevelSynthesis
hls_config=HLSConfig(
permutation=[("acg", {})],
)
qc_transpiled = transpile(qc, optimization_level=1, hls_config=hls_config)
qc_transpiled.draw(output='mpl')
from qiskit.transpiler.passes.synthesis.high_level_synthesis import HLSConfig, HighLevelSynthesis
hls_config=HLSConfig(
permutation=[("kms", {})],
)
qc_transpiled = transpile(qc, optimization_level=1, hls_config=hls_config)
qc_transpiled.draw(output='mpl')
from qiskit.transpiler.passes.synthesis.high_level_synthesis import HLSConfig, HighLevelSynthesis
hls_config=HLSConfig(
use_default_on_unspecified=False,
permutation=[("kms", {})],
)
qc_transpiled = transpile(qc, optimization_level=1, hls_config=hls_config)
qc_transpiled.draw(output='mpl')
qc = QuantumCircuit(4)
qc.swap(0, 1)
qc.swap(1, 2)
qc.swap(2, 3)
qc.swap(1, 0)
qc.swap(2, 3)
qc.swap(3, 1)
qc.h(0)
qc.swap(0, 2)
qc.draw(output='mpl')
# Use our SwapCollector to extract SWAP blocks and to replace them by PermutationGates
qc_extracted = SwapCollector(qc)
qc_extracted.draw(output='mpl')
# Resynthesize using 'acg' method
hls_config = hls_config=HLSConfig(
permutation=[("acg", {})],
)
qc_resynthesized = HighLevelSynthesis(hls_config=hls_config)(qc_extracted)
qc_resynthesized.draw(output='mpl')
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.primitives import Estimator
from qiskit.algorithms.gradients import ParamShiftEstimatorGradient
estimator = Estimator()
ps = ParamShiftEstimatorGradient(estimator)
from qiskit.circuit.library import EfficientSU2
from qiskit.quantum_info import SparsePauliOp
def ising_hamiltonian(num_qubits):
return SparsePauliOp.from_sparse_list(
[("ZZ", [i, i + 1], 1) for i in range(num_qubits - 1)] + [("X", [i], -1) for i in range(num_qubits)],
num_qubits=num_qubits
)
num_qubits = 3
hamiltonian = ising_hamiltonian(num_qubits)
print(hamiltonian)
circuit = EfficientSU2(num_qubits, reps=1).decompose()
print(circuit.draw())
import numpy as np
values = np.random.random(circuit.num_parameters)
gradient = ps.run([circuit], [hamiltonian], [values]).result()
print(gradient.gradients[0])
from qiskit.algorithms.gradients import ReverseEstimatorGradient
reverse = ReverseEstimatorGradient() # classical calculation, not primitive-based!
gradient = reverse.run([circuit], [hamiltonian], [values]).result()
print(gradient.gradients[0])
import time
import numpy as np
from qiskit.circuit.library import EfficientSU2
def time_gradients(num_layers, gradient, num_qubits=5, averages=5):
circuit = EfficientSU2(num_qubits, reps=num_layers).decompose()
hamiltonian = ising_hamiltonian(num_qubits)
values = np.linspace(1, 2, num=circuit.num_parameters)
timings = []
for _ in range(averages):
start = time.time()
__ = gradient.run([circuit], [hamiltonian], [values]).result()
timings.append(time.time() - start)
return np.mean(timings), np.std(timings)
layers = [2, 4, 6, 8, 10]
num_qubits = 5
ps_times = np.array([time_gradients(num_layers, ps) for num_layers in layers])
rev_times = np.array([time_gradients(num_layers, reverse) for num_layers in layers])
import matplotlib.pyplot as plt
num_parameters = (np.array(layers) + 1) * 2 * num_qubits
plt.figure(figsize=(12, 6))
plt.plot(num_parameters, ps_times[:, 0], color="royalblue", label="circuit-based")
plt.fill_between(num_parameters, ps_times[:, 0] - ps_times[:, 1], ps_times[:, 0] + ps_times[:, 1], color="royalblue", alpha=0.2)
plt.plot(num_parameters, rev_times[:, 0], color="seagreen", label="reverse")
plt.fill_between(num_parameters, rev_times[:, 0] - rev_times[:, 1], rev_times[:, 0] + rev_times[:, 1], color="seagreen", alpha=0.2)
plt.title("Gradient benchmark")
plt.xlabel("number of parameters")
plt.ylabel("runtime in seconds");
from qiskit.algorithms.minimum_eigensolvers import VQE
from qiskit.algorithms.optimizers import GradientDescent
from qiskit.algorithms.time_evolvers.variational import RealMcLachlanPrinciple
def time_vqe(num_layers, gradient, num_qubits=4):
circuit = EfficientSU2(num_qubits, reps=num_layers).decompose()
hamiltonian = ising_hamiltonian(num_qubits)
optimizer = GradientDescent(maxiter=100)
initial_point = np.zeros(circuit.num_parameters)
vqe = VQE(estimator, circuit, optimizer, gradient=gradient, initial_point=initial_point)
start = time.time()
_ = vqe.compute_minimum_eigenvalue(hamiltonian)
took = time.time() - start
print("done in", took)
return took
num_qubits = 4
layers = [2, 4, 6, 8, 10, 12]
num_parameters = (np.array(layers) + 1) * 2 * num_qubits
vqe_ps_times = np.array([time_vqe(num_layers, ps) for num_layers in layers])
# vqe_ps_times = np.load("vqe_ps_times.npy")
vqe_rev_times = np.array([time_vqe(num_layers, reverse) for num_layers in layers])
# vqe_rev_times = np.load("vqe_rev_times.npy")
plt.figure(figsize=(12, 6))
plt.plot(num_parameters, vqe_ps_times / 60, color="royalblue", marker="o", label="circuit-based")
plt.plot(num_parameters, vqe_rev_times / 60, color="seagreen", marker="v", label="reverse")
plt.legend(loc="best")
plt.title("VQE with 100 steps")
plt.xlabel("number of parameters")
plt.ylabel("time in minutes");
from qiskit.algorithms.gradients import ReverseQGT
from qiskit.algorithms.time_evolvers import VarQRTE, TimeEvolutionProblem
from qiskit.algorithms.time_evolvers.variational import RealMcLachlanPrinciple
def time_varqte(num_layers, estimator, gradient, qgt, num_qubits=5, averages=1):
circuit = EfficientSU2(num_qubits, reps=num_layers).decompose()
hamiltonian = ising_hamiltonian(num_qubits)
problem = TimeEvolutionProblem(hamiltonian, time=1)#, aux_operators=[hamiltonian])
principle = RealMcLachlanPrinciple(qgt, gradient)
initial_values = np.zeros(circuit.num_parameters)
varqte = VarQRTE(circuit, initial_values, principle, estimator=estimator, num_timesteps=100)
start = time.time()
_ = varqte.evolve(problem)
took = time.time() - start
print("done in", took)
return took
from qiskit.algorithms.gradients import LinCombEstimatorGradient, LinCombQGT
num_qubits = 4
layers = [2, 4, 6, 8]
num_parameters = (np.array(layers) + 1) * 2 * num_qubits
lcu_qgt = LinCombQGT(estimator)
lcu = LinCombEstimatorGradient(estimator)
# varqte_times = np.array([time_varqte(num_layers, estimator, lcu, lcu_qgt) for num_layers in layers])
varqte_times = np.load("varqte_times.npy")
rev_qgt = ReverseQGT()
# revqte_times = np.array([time_varqte(num_layers, estimator, reverse, rev_qgt) for num_layers in layers])
revqte_times = np.load("revqte_times.npy")
plt.figure(figsize=(12, 6))
plt.plot(num_parameters[:len(varqte_times)], varqte_times / 60, color="royalblue", marker="o", label="circuit-based")
plt.plot(num_parameters, revqte_times / 60, color="seagreen", marker="v", label="reverse")
plt.legend(loc="best")
plt.title("VarQTE with 100 timesteps")
plt.xlabel("number of parameters")
plt.ylabel("time in minutes");
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver()
problem = driver.run()
hamiltonian = problem.hamiltonian.second_q_op()
print(hamiltonian)
from qiskit_nature.second_q.mappers import JordanWignerMapper
mapper = JordanWignerMapper()
print(mapper.map(hamiltonian))
from qiskit_nature.second_q.mappers import QubitConverter
converter = QubitConverter(mapper)
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
from qiskit_nature.second_q.algorithms import GroundStateEigensolver
algo = GroundStateEigensolver(converter, NumPyMinimumEigensolver())
print(algo.solve(problem))
algo = GroundStateEigensolver(mapper, NumPyMinimumEigensolver())
print(algo.solve(problem))
from qiskit_nature.second_q.mappers import ParityMapper
mapper = ParityMapper(num_particles=(1, 1))
print(mapper.map(hamiltonian))
tapered_mapper = problem.get_tapered_mapper(mapper)
print(type(tapered_mapper))
print(tapered_mapper.map(hamiltonian))
from qiskit_nature.second_q.circuit.library import HartreeFock
hf_state = HartreeFock(2, (1, 1), JordanWignerMapper())
hf_state.draw()
from qiskit_nature.second_q.mappers import InterleavedQubitMapper
interleaved_mapper = InterleavedQubitMapper(JordanWignerMapper())
hf_state = HartreeFock(2, (1, 1), interleaved_mapper)
hf_state.draw()
print(type(mapper.map(hamiltonian)))
from qiskit_nature import settings
settings.use_pauli_sum_op = False
print(type(mapper.map(hamiltonian)))
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.circuit import *
qreg, creg = QuantumRegister(5, "q"), ClassicalRegister(2, "c")
body = QuantumCircuit(3, 1)
loop_parameter = Parameter("foo")
indexset = range(0, 10, 2)
body.rx(loop_parameter, [0, 1, 2])
circuit = QuantumCircuit(qreg, creg)
circuit.for_loop(indexset, loop_parameter, body, [1, 2, 3], [1])
print(circuit.draw())
from qiskit.qasm3 import dumps
print(dumps(circuit))
from qiskit.transpiler.passes import UnrollForLoops
circuit_unroll = UnrollForLoops()(circuit)
print(dumps(circuit_unroll))
circuit_unroll.draw()
print(dumps(UnrollForLoops(max_target_depth=4)(circuit)))
from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager
pass_manager = generate_preset_pass_manager(1)
pass_manager.run(circuit).draw()
pass_manager.pre_optimization.append(UnrollForLoops())
pass_manager.run(circuit).draw()
# skip when continue and break
import math
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 1)
with qc.for_loop(range(5)) as i:
qc.rx(i * math.pi/4, 0)
qc.cx(0, 1)
qc.measure(0, 0)
qc.break_loop().c_if(0, True)
print(dumps(qc))
print(dumps(UnrollForLoops()(qc)))
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_aer.primitives import Estimator
from qiskit.quantum_info import SparsePauliOp
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(num_qubits=2, reps=2)
observable = SparsePauliOp.from_list([("XX", 1), ("YY", 2), ("ZZ", 3)])
estimator = Estimator()
result = estimator.run(ansatz, observable, parameter_values=[0, 1, 1, 2, 3, 5]).result()
print("Expectation value:", result.values)
estimator = Estimator(approximation=True)
result = estimator.run(ansatz, observable, parameter_values=[0, 1, 1, 2, 3, 5]).result()
print("Expectation value:", result.values)
from itertools import product
import numpy as np
num_qubits = 8
abelian_grouping = True
ansatz = RealAmplitudes(num_qubits=num_qubits, reps=1)
observables = [
SparsePauliOp("".join(pauli_str)) for pauli_str in product("IZ", repeat=num_qubits)
]
parameter_values = list(np.random.rand(1, ansatz.num_parameters))
estimator = Estimator(abelian_grouping=abelian_grouping)
%%time
result = estimator.run(
circuits=[ansatz] * len(observables),
observables=observables,
parameter_values=parameter_values * len(observables),
).result()
# opflow and estimator can group
SparsePauliOp(["XX", "XI"])
# Estimator can group
[SparsePauliOp("XX"), SparsePauliOp("XI")]
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import qiskit
qiskit.__version__
from qiskit import qasm2
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.random import random_circuit
qc = random_circuit(100, 100)
qasm_string = qc.qasm()
%timeit -n1 qasm2.loads(qasm_string, include_path=qasm2.LEGACY_INCLUDE_PATH, custom_instructions=qasm2.LEGACY_CUSTOM_INSTRUCTIONS, custom_classical=qasm2.LEGACY_CUSTOM_CLASSICAL, strict=False)
%timeit -n1 QuantumCircuit.from_qasm_str(qasm_string)
import numpy as np
import rustworkx as rx
from qiskit import transpile
from qiskit.providers import BackendV2, Options
from qiskit.transpiler import Target, InstructionProperties
from qiskit.circuit.library import XGate, SXGate, RZGate, CZGate
from qiskit.circuit import Measure, Delay, Parameter
class FakeMultiChip(BackendV2):
"""Fake multi chip backend."""
def __init__(self, degree=3):
super().__init__(name='multi_chip')
graph = rx.generators.directed_heavy_hex_graph(degree, bidirectional=False)
num_qubits = len(graph) * 3
rng = np.random.default_rng(seed=12345678942)
rz_props = {}
x_props = {}
sx_props = {}
measure_props = {}
delay_props = {}
self._target = Target("Fake multi-chip backend", num_qubits=num_qubits)
for i in range(num_qubits):
qarg = (i,)
rz_props[qarg] = InstructionProperties(error=0.0, duration=0.0)
x_props[qarg] = InstructionProperties(
error=rng.uniform(1e-6, 1e-4), duration=rng.uniform(1e-8, 9e-7)
)
sx_props[qarg] = InstructionProperties(
error=rng.uniform(1e-6, 1e-4), duration=rng.uniform(1e-8, 9e-7)
)
measure_props[qarg] = InstructionProperties(
error=rng.uniform(1e-3, 1e-1), duration=rng.uniform(1e-8, 9e-7)
)
delay_props[qarg] = None
self._target.add_instruction(XGate(), x_props)
self._target.add_instruction(SXGate(), sx_props)
self._target.add_instruction(RZGate(Parameter("theta")), rz_props)
self._target.add_instruction(Measure(), measure_props)
self._target.add_instruction(Delay(Parameter("t")), delay_props)
cz_props = {}
for i in range(3):
for root_edge in graph.edge_list():
offset = i * len(graph)
edge = (root_edge[0] + offset, root_edge[1] + offset)
cz_props[edge] = InstructionProperties(
error=rng.uniform(1e-5, 5e-3), duration=rng.uniform(1e-8, 9e-7)
)
self._target.add_instruction(CZGate(), cz_props)
@property
def target(self):
return self._target
@property
def max_circuits(self):
return None
@classmethod
def _default_options(cls):
return Options(shots=1024)
def run(self, circuit, **kwargs):
raise NotImplementedError
backend = FakeMultiChip()
print(backend.num_qubits)
backend.coupling_map.draw()
qc = QuantumCircuit(42)
qc.h(0)
qc.h(10)
qc.h(20)
qc.cx(0, 1)
qc.cx(0, 2)
qc.cx(0, 3)
qc.cx(0, 4)
qc.cx(0, 5)
qc.cx(0, 6)
qc.cx(0, 7)
qc.cx(0, 8)
qc.cx(0, 9)
qc.ecr(10, 11)
qc.ecr(10, 12)
qc.ecr(10, 13)
qc.ecr(10, 14)
qc.ecr(10, 15)
qc.ecr(10, 16)
qc.ecr(10, 17)
qc.ecr(10, 18)
qc.ecr(10, 19)
qc.cy(20, 21)
qc.cy(20, 22)
qc.cy(20, 23)
qc.cy(20, 24)
qc.cy(20, 25)
qc.cy(20, 26)
qc.cy(20, 27)
qc.cy(20, 28)
qc.cy(20, 29)
qc.h(30)
qc.cx(30, 31)
qc.cx(30, 32)
qc.cx(30, 33)
qc.h(34)
qc.cx(34, 35)
qc.cx(34, 36)
qc.cx(34, 37)
qc.h(38)
qc.cx(38, 39)
qc.cx(39, 40)
qc.cx(39, 41)
qc.measure_all()
qc.draw('mpl', fold=-1)
res = transpile(qc, backend, seed_transpiler=420)
res.draw('mpl', fold=-1)
from rustworkx.visualization import graphviz_draw
graph = backend.target.build_coupling_map("cz").graph
for node in graph.node_indices():
graph[node] = node
for edge, triple in graph.edge_index_map().items():
graph.update_edge_by_index(edge, (triple[0], triple[1]))
physical_bits = {k: v for k, v in res.layout.initial_layout.get_physical_bits().items() if "ancilla" not in v._register.name}
qubit_indices = {bit: index for index, bit in enumerate(qc.qubits)}
def color_node(node):
if node in physical_bits:
out_dict = {
"label": str(res.layout.input_qubit_mapping[physical_bits[node]]),
"color": "red",
"fillcolor": "red",
"style": "filled",
"shape": "circle",
}
else:
out_dict = {
"label": "",
"color": "blue",
"fillcolor": "blue",
"style": "filled",
"shape": "circle",
}
return out_dict
def color_edge(edge):
if all(x in physical_bits for x in edge):
out_dict = {
"color": "red",
"fillcolor": "red",
}
else:
out_dict = {
"color": "blue",
"fillcolor": "blue",
}
return out_dict
graphviz_draw(graph, method="neato", node_attr_fn=color_node, edge_attr_fn=color_edge)
from qiskit.transpiler import TranspilerError
qc = QuantumCircuit(42)
qc.h(0)
for i in range(41):
qc.cx(0, i + 1)
qc.measure_all()
try:
transpile(qc, backend)
except TranspilerError as e:
print(e)
from qiskit import opflow
from qiskit.utils import QuantumInstance
QuantumInstance(backend)
from qiskit.algorithms.minimum_eigen_solvers import VQE
VQE()
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
qreg = QuantumRegister(2)
creg = ClassicalRegister(2)
qc = QuantumCircuit(qreg, creg)
qc.h([0, 1])
qc.measure([0, 1], [0, 1])
with qc.switch(creg) as case:
with case(0): # if the register is '00'
qc.z(0)
with case(1, 2): # if the register is '01' or '10'
qc.cx(0, 1)
with case(case.DEFAULT): # the default case
qc.h(0)
qc.draw('mpl')
from qiskit.circuit import SwitchCaseOp
backend.target.add_instruction(SwitchCaseOp, name='switch_case')
tqc = transpile(qc, backend)
tqc.draw('mpl', idle_wires=False)
from qiskit.qasm3 import dumps
from qiskit.qasm3 import ExperimentalFeatures
print(dumps(qc, experimental=ExperimentalFeatures.SWITCH_CASE_V1))
print(dumps(tqc, experimental=ExperimentalFeatures.SWITCH_CASE_V1))
import io
from qiskit.qpy import dump, load
with io.BytesIO() as fd:
dump(tqc, fd)
fd.seek(0)
loaded_tqc = load(fd)[0]
print(dumps(loaded_tqc, experimental=ExperimentalFeatures.SWITCH_CASE_V1))
import time
import numpy as np
from qiskit.circuit.random import random_circuit
from qiskit import transpile
from qiskit.providers.fake_provider import FakeSherbrooke
from qiskit.circuit.library import IGate
backend = FakeSherbrooke()
backend.target.add_instruction(IGate(), {(i,): None for i in range(backend.num_qubits)})
for i in np.linspace(1, backend.num_qubits, dtype=int):
qc = random_circuit(i, i, measure=True, seed=42023)
start = time.perf_counter()
tqc = transpile(qc, backend)
stop = time.perf_counter()
print(f"{i}, {stop - start}")
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.circuit import Parameter
from qiskit import QuantumCircuit
from qiskit.primitives import Estimator
from qiskit.quantum_info import SparsePauliOp
from qiskit.algorithms import TimeEvolutionProblem
from qiskit.algorithms.time_evolvers import TrotterQRTE
from qiskit.synthesis import LieTrotter
import numpy as np
A = lambda t: 1 - t
B = lambda t: t
t_param = Parameter("t")
op1 = SparsePauliOp(["X"], np.array([2*A(t_param)]))
op2 = SparsePauliOp(["Z"], np.array([2*B(t_param)]))
op3 = SparsePauliOp(["Y"], np.array([1000]))
H_t = op1 + op2 + op3
print(H_t)
aux_op = [SparsePauliOp(["Z"])]
T = 2
reps = 10
init = QuantumCircuit(1)
# init.h(0)
evolution_problem = TimeEvolutionProblem(
H_t,
time=T,
initial_state=init,
aux_operators=aux_op,
t_param=t_param
)
pf = LieTrotter(reps=1)
estimator = Estimator()
trotter_qrte = TrotterQRTE(product_formula=pf, num_timesteps=reps, estimator=estimator)
result = trotter_qrte.evolve(evolution_problem)
full_evo_circ = result.evolved_state
print(full_evo_circ.decompose().decompose())
aux_op_res = result.aux_ops_evaluated
obs_evo = result.observables
print(f"final result (previous only aux_op evaluation): {aux_op_res}\n")
print("Newly added aux_op evaluations at each time step:")
for i, m in enumerate(obs_evo):
print(f"step {i}: <Z> = {m}")
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import networkx as nx
from qiskit_optimization.applications import Maxcut
seed = 1
num_nodes = 6
graph = nx.random_regular_graph(d=3, n=num_nodes, seed=seed)
nx.draw(graph, with_labels=True, pos=nx.spring_layout(graph, seed=seed))
maxcut = Maxcut(graph)
problem = maxcut.to_quadratic_program()
print(problem.prettyprint())
from qiskit.algorithms.optimizers import COBYLA
from qiskit.algorithms.minimum_eigensolvers import VQE
from qiskit.circuit.library import RealAmplitudes
from qiskit.primitives import Estimator
from qiskit_optimization.algorithms.qrao import (
QuantumRandomAccessOptimizer,
SemideterministicRounding,
)
# Prepare the VQE algorithm
ansatz = RealAmplitudes(2)
vqe = VQE(
ansatz=ansatz,
optimizer=COBYLA(),
estimator=Estimator(),
)
# Use semi-deterministic rounding, known as "Pauli rounding"
# in https://arxiv.org/pdf/2111.03167v2.pdf
# (This is the default if no rounding scheme is specified.)
semidterministic_rounding = SemideterministicRounding()
# Construct the optimizer
qrao = QuantumRandomAccessOptimizer(
min_eigen_solver=vqe,
rounding_scheme=semidterministic_rounding
)
import numpy as np
# Solve the optimization problem
results = qrao.solve(problem)
print(
f"The objective function value: {results.fval}\n"
f"x: {results.x}\n"
f"relaxed function value: {-1 * results.relaxed_fval}\n"
)
print(
f"The obtained solution places a partition between nodes {np.where(results.x == 0)[0]} "
f"and nodes {np.where(results.x == 1)[0]}."
)
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
from qiskit_optimization.algorithms import MinimumEigenOptimizer
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
exact_result = exact.solve(problem)
print(exact_result.prettyprint())
print("QRAO Approximate Optimal Function Value:", results.fval)
print("Exact Optimal Function Value:", exact_result.fval)
print(f"Approximation Ratio: {results.fval / exact_result.fval :.2f}")
from qiskit.primitives import Sampler
from qiskit_optimization.algorithms.qrao import MagicRounding
estimator = Estimator(options={"shots": 1000, "seed": seed})
sampler = Sampler(options={"shots": 10000, "seed": seed})
# Prepare the VQE algorithm
ansatz = RealAmplitudes(2)
vqe = VQE(
ansatz=ansatz,
optimizer=COBYLA(),
estimator=estimator,
)
# Use magic rounding
magic_rounding = MagicRounding(sampler=sampler)
# Construct the optimizer
qrao = QuantumRandomAccessOptimizer(min_eigen_solver=vqe, rounding_scheme=magic_rounding)
results = qrao.solve(problem)
print(
f"The objective function value: {results.fval}\n"
f"x: {results.x}\n"
f"relaxed function value: {-1 * results.relaxed_fval}\n"
)
print(f"The number of distinct samples is {len(results.samples)}.")
print("Top 10 samples with the largest fval:")
for sample in results.samples[:10]:
print(sample)
from qiskit_optimization.algorithms.qrao import QuantumRandomAccessEncoding
# Encode the QUBO problem into a relaxed Hamiltonian
encoding = QuantumRandomAccessEncoding(max_vars_per_qubit=3)
encoding.encode(problem)
# Solve the relaxed problem
relaxed_results, rounding_context = qrao.solve_relaxed(encoding)
magic_rounding = MagicRounding(sampler=sampler)
mr_results = magic_rounding.round(rounding_context)
qrao_results_mr = qrao.process_result(
problem=problem, encoding=encoding, relaxed_result=relaxed_results, rounding_result=mr_results
)
print(
f"The objective function value: {qrao_results_mr.fval}\n"
f"x: {qrao_results_mr.x}\n"
f"relaxed function value: {-1 * qrao_results_mr.relaxed_fval}\n"
f"The number of distinct samples is {len(qrao_results_mr.samples)}."
)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
# pip version of CPLEX is not available for Apple silicon
%pip install cplex
import numpy as np
from qiskit_optimization import QuadraticProgram
from qiskit_optimization.algorithms import GurobiOptimizer
prob = QuadraticProgram()
n = 2001
prob.binary_var_list(n)
prob.minimize(linear=np.ones(n))
print(prob.prettyprint())
GurobiOptimizer().solve(prob)
from qiskit_optimization.applications import Knapsack
prob = Knapsack(
values = [1, 2, 3, 4],
weights= [5, 6, 7, 8],
max_weight=20
).to_quadratic_program()
print(prob.prettyprint())
from qiskit_optimization.algorithms import ScipyMilpOptimizer
milp = ScipyMilpOptimizer()
result = milp.solve(prob)
print(result)
from qiskit_optimization.applications import Maxcut
import networkx as nx
graph = nx.random_regular_graph(d=3, n=8, seed=123)
maxcut = Maxcut(graph)
pos = nx.spring_layout(graph, seed=123)
maxcut.draw(pos=pos)
prob = maxcut.to_quadratic_program()
print(prob.prettyprint())
result = milp.solve(prob)
def quadratic_to_linear(problem: QuadraticProgram) -> QuadraticProgram:
new_prob = QuadraticProgram(problem.name)
for x in problem.variables:
new_prob._add_variable(x.lowerbound, x.upperbound, x.vartype, x.name)
obj = problem.objective
lin = obj.linear.to_dict(use_name=True)
quad = obj.quadratic.to_dict(use_name=True)
prod = {}
for (x, y), coeff in quad.items():
name = f"{x}_AND_{y}"
prod[x, y] = name
new_prob.continuous_var(0, 1, name)
lin[name] = coeff
for (x, y), name in prod.items():
new_prob.linear_constraint({name: 1, x: -1}, "<=", 0)
new_prob.linear_constraint({name: 1, y: -1}, "<=", 0)
new_prob.linear_constraint({name: 1, x: -1, y: -1}, ">=", -1)
if obj.sense == obj.sense.MAXIMIZE:
new_prob.maximize(obj.constant, lin)
else:
new_prob.minimize(obj.constant, lin)
return new_prob
new_prob = quadratic_to_linear(prob)
print(new_prob.prettyprint())
result = milp.solve(new_prob)
print(result)
maxcut.draw(result.x[:prob.get_num_vars()], pos)
result = GurobiOptimizer().solve(prob)
print(result)
maxcut.draw(result, pos)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.circuit import Parameter, QuantumCircuit
x = Parameter('x')
y = Parameter('y')
circuit = QuantumCircuit(2)
circuit.rx(x, 1)
circuit.cx(1, 0)
circuit.ry(-2*y, 0)
circuit.draw('mpl')
from qiskit.quantum_info import Operator
try:
op = Operator(circuit) # not supported in qiskit-terra
except TypeError:
print('TypeError: unbound parameters cannot be cast to float')
from qiskit_symb import Operator as SymbOperator
op = SymbOperator(circuit)
op.to_sympy()
op1 = op.subs({y: 0})
op1.to_sympy()
from numpy import pi
op2 = op.subs({x: 0, y: pi})
op2.to_sympy()
import numpy as np
# generate random data in [0, 2π]
num_samples = 1000
num_features = 3
data = np.random.rand(num_samples, num_features) * 2*np.pi
print(data)
from qiskit.circuit.library import ZZFeatureMap
# create a data encoding PQC
pqc = ZZFeatureMap(num_features, reps=1).decompose()
pqc.draw('mpl')
from qiskit import Aer
from qiskit_symb import Statevector as SymbStatevector
def aer_simulator(pqc, data):
simulator = Aer.get_backend('statevector_simulator')
pars_dict = dict(zip(pqc.parameters, data.transpose()))
job = simulator.run([pqc], parameter_binds=[pars_dict], shots=1)
psi = job.result().results[-1].data.statevector
return psi
def symb_simulator(pqc, data):
circuit = SymbStatevector(pqc).to_lambda()
for x in data:
psi = circuit(*x)
return psi
%%time
psi_1 = aer_simulator(pqc=pqc, data=data)
print(psi_1)
%%time
psi_2 = symb_simulator(pqc=pqc, data=data)
print(psi_2)
numpy_arr_1 = psi_1.data
numpy_arr_2 = psi_2[:, 0]
print(np.allclose(numpy_arr_1, numpy_arr_2))
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from numpy import pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(2, "cr")
qc = QuantumCircuit(qr, cr)
with qc.if_test((cr[1], 1)):
qc.h(0)
qc.cx(0, 1)
qc.draw()
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(2, "cr")
qc = QuantumCircuit(qr, cr)
with qc.if_test((cr[1], 1)) as _else:
qc.h(0)
qc.cx(0, 1)
with _else:
qc.cx(0, 1)
qc.draw()
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(2, "cr")
qc = QuantumCircuit(qr, cr)
with qc.if_test((cr[1], 1)) as _else:
qc.h(0)
qc.cx(0, 1)
with qc.if_test((cr[0], 0)):
qc.x(0)
with _else:
qc.cx(0, 1)
qc.draw()
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(3, "cr")
qc = QuantumCircuit(qr, cr)
qc.h(0)
with qc.if_test((cr[1], 1)) as _else:
qc.x(0, label="X c_if").c_if(cr, 4)
with qc.if_test((cr[2], 1)):
qc.z(0)
qc.y(1)
with qc.if_test((cr[1], 1)):
qc.y(1)
qc.z(2)
with qc.if_test((cr[2], 1)):
qc.cx(0, 1)
with qc.if_test((cr[1], 1)):
qc.h(0)
qc.x(1)
with _else:
qc.y(1)
with qc.if_test((cr[2], 1)):
qc.x(0)
qc.x(1)
qr1 = QuantumRegister(2, "qr1")
cr1 = ClassicalRegister(2, "cr1")
cr2 = ClassicalRegister(2, "cr2")
circuit = QuantumCircuit(qr1, cr1, cr2)
inst = QuantumCircuit(2, 2, name="Inst").to_instruction()
qc.append(inst, [qr[0], qr[1]], [cr[0], cr[1]])
qc.x(0)
qc.draw(fold=30, style={"name": "default", "showindex": True})#,wire_order=(2, 0, 3, 1, 4, 5, 6))
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(3, "cr")
qc = QuantumCircuit(qr, cr)
qc.h(0)
with qc.if_test((cr[1], 1)) as _else:
qc.x(0, label="X c_if").c_if(cr, 4)
with qc.if_test((cr[2], 1)):
qc.z(0)
qc.y(1)
with qc.if_test((cr[1], 1)):
qc.y(1)
qc.z(2)
with qc.if_test((cr[2], 1)):
qc.cx(0, 1)
with qc.if_test((cr[1], 1)):
qc.h(0)
qc.x(1)
with _else:
qc.y(1)
with qc.if_test((cr[2], 1)):
qc.x(0)
qc.x(1)
qr1 = QuantumRegister(2, "qr1")
cr1 = ClassicalRegister(2, "cr1")
cr2 = ClassicalRegister(2, "cr2")
circuit = QuantumCircuit(qr1, cr1, cr2)
inst = QuantumCircuit(2, 2, name="Inst").to_instruction()
qc.append(inst, [qr[0], qr[1]], [cr[0], cr[1]])
qc.x(0)
qc.draw(fold=7, style={"name": "default", "showindex": True}, scale=0.8)
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(3, "cr")
qc = QuantumCircuit(qr, cr)
qc.h(0)
with qc.if_test((cr[1], 1)) as _else:
qc.x(0, label="X c_if").c_if(cr, 4)
with qc.if_test((cr[2], 1)):
qc.z(0)
qc.y(1)
with qc.if_test((cr[1], 1)):
qc.y(1)
qc.z(2)
with qc.if_test((cr[2], 1)):
qc.cx(0, 1)
with qc.if_test((cr[1], 1)):
qc.h(0)
qc.x(1)
with _else:
qc.y(1)
with qc.if_test((cr[2], 1)):
qc.x(0)
qc.x(1)
qr1 = QuantumRegister(2, "qr1")
cr1 = ClassicalRegister(2, "cr1")
cr2 = ClassicalRegister(2, "cr2")
circuit = QuantumCircuit(qr1, cr1, cr2)
inst = QuantumCircuit(2, 2, name="Inst").to_instruction()
qc.append(inst, [qr[0], qr[1]], [cr[0], cr[1]])
qc.x(0)
qc.draw(fold=30, style={"name": "textbook", "showindex": True})
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(3, "cr")
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.measure(0, 2)
with qc.while_loop((cr[0], 0)):
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
with qc.if_test((cr[2], 1)):
qc.x(0)
qc.draw(style={"showindex": True})
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(3, "cr")
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.measure(0, 2)
with qc.for_loop((2, 4, 8, 16)) as i:
qc.h(0)
qc.cx(0, 1)
qc.rx(pi/i, 1)
qc.measure(0, 0)
with qc.if_test((cr[2], 1)):
qc.z(0)
qc.draw(style={"showindex": True})
from qiskit.circuit import QuantumCircuit, ClassicalRegister, QuantumRegister
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
qc = QuantumCircuit(qreg, creg)
qc.h([0, 1, 2])
qc.measure([0, 1, 2], [0, 1, 2])
with qc.switch(creg) as case:
with case(0, 1, 2):
qc.x(0)
with case(3, 4, 5):
qc.y(1)
qc.y(0)
qc.y(0)
with case(case.DEFAULT):
qc.cx(0, 1)
qc.h(0)
qc.draw(style={"showindex": True})
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit_nature.second_q.operators import BosonicOp, FermionicOp
# Create a creation operator
bosonic_op = BosonicOp({'+_0': 1}, num_modes=1)
# Create an annihilation operator
bosonic_op = BosonicOp({'-_0': 1}, num_modes=1)
bosonic_op1 = BosonicOp({'+_0 -_0 -_1 +_1': 2}, num_modes=2)
bosonic_op2 = BosonicOp({'+_2 -_2': 1.5, '+_0': 3}, num_modes=3)
# Sum
print("Sum")
print(bosonic_op1 + bosonic_op2)
# Multiplication
print("\nMultiplication")
print(0.5 * bosonic_op1 @ bosonic_op2)
bosonic_op = BosonicOp({'+_0 -_0 -_1 +_0': 1}, num_modes=2)
# Normal order (reorder all operators such that all creation op are in front). Due to the commutation relations, when inverting -_0 +_0 we get 1 + +_0 -_0, which explains the two terms
print("Normal Ordering")
print(bosonic_op.normal_order())
# Index order (reorder all operators such that the lowest indices come first)
print("\nIndex Ordering")
print(bosonic_op.index_order())
# Simplify
print("Simplify")
# Simplify can be used to reduce numerical noise:
print(BosonicOp({'+_0 -_0': 1}) + BosonicOp({'+_0 -_0': 1e-10}).simplify(atol=1e-9))
print("\nSimplify with two creation operators")
print("\nBosons:")
print(BosonicOp({'+_0 -_0 -_1 +_0 +_0': 1}, num_modes=2).simplify())
print("\nFermions:")
print(FermionicOp({'+_0 -_0 -_1 +_0 +_0': 1}, num_spin_orbitals=2).simplify())
FermionicOp({'+_0 -_0 -_1 +_0': 1}, num_spin_orbitals=2).simplify()
from qiskit_nature.second_q.mappers import BosonicLinearMapper
from qiskit_nature import settings
settings.use_pauli_sum_op = False
mapper_occ1 = BosonicLinearMapper(max_occupation=1)
boson1 = mapper_occ1.map(BosonicOp({'+_0 -_0': 1}))
print(boson1)
print("Max occupation = 1")
boson_p1 = mapper_occ1.map(BosonicOp({'+_0': 1}))
boson_m1 = mapper_occ1.map(BosonicOp({'-_0': 1}))
print(boson_p1)
print("\nMax occupation = 2")
mapper_occ2 = BosonicLinearMapper(max_occupation=2)
boson_p2 = mapper_occ2.map(BosonicOp({'+_0': 1}))
boson_m2 = mapper_occ2.map(BosonicOp({'-_0': 1}))
print(boson_p2)
print("\nMax occupation = 3")
mapper_occ3 = BosonicLinearMapper(max_occupation=3)
boson_p3 = mapper_occ3.map(BosonicOp({'+_0': 1}))
boson_m3 = mapper_occ3.map(BosonicOp({'-_0': 1}))
print(boson_p3)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import qiskit
qiskit.__version__
from qiskit.circuit.library import XGate
gate = XGate()
print(f"name: {gate.name}\n")
print(f"params: {gate.params}\n")
print(f"matrix:\n{gate.to_matrix()}\n")
print(f"definition:\n{gate.definition}\n")
new_gate = XGate()
print(f"name: {gate.name}\n")
print(f"params: {gate.params}\n")
print(f"matrix:\n{gate.to_matrix()}\n")
print(f"definition:\n{gate.definition}\n")
XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate()
try:
XGate().label = "My Extra Special XGate made with secret sauce"
except NotImplementedError as e:
print(e)
labelled_gate = XGate(label="My Extra Special XGate made with secret sauce")
print(labelled_gate.label)
print(f"Shared object: {labelled_gate is XGate()}")
gate = XGate()
gate.mutable
mutable_gate = gate.to_mutable()
mutable_gate.label = "My Extra Special XGate made with secret sauce"
print(mutable_gate.label)
print(f"Shared object: {mutable_gate is XGate()}")
from qiskit.circuit import Clbit
gate = XGate()
conditional_gate = gate.c_if(Clbit(), 0)
print(conditional_gate.condition)
print(f"Shared object: {conditional_gate is XGate()}")
from qiskit.circuit import QuantumCircuit
qc = QuantumCircuit(1, 1)
qc.x(0).c_if(qc.clbits[0], 1)
print(qc)
print(qc.data[0].operation is XGate())
|
https://github.com/Qiskit/feedback
|
Qiskit
|
# Cell 1: simple representation
from qiskit import QuantumCircuit
from qiskit.circuit import QuantumRegister, ClassicalRegister, Clbit
from qiskit.circuit.classical import expr
cr1 = ClassicalRegister(3, "cr1")
cr2 = ClassicalRegister(3, "cr2")
expr.equal(expr.bit_and(cr1, cr2), 5)
# Cell 2: where can I use these?
qc1 = QuantumCircuit(QuantumRegister(3), cr1, cr2)
with qc1.if_test((cr1, 5)):
qc1.x(0)
with qc1.if_test(expr.equal(cr1, 5)):
qc1.x(0)
# But we're not limited to equality relations!
with qc1.if_test(expr.less_equal(cr1, 5)):
qc1.z(0)
qc1.data[-1].operation.condition
# Cell 3: where else?
qc2 = QuantumCircuit(QuantumRegister(3), cr1, cr2)
with qc2.while_loop(expr.logic_or(expr.equal(cr1, 5), cr2[0])):
qc2.x(0)
with qc2.switch(expr.bit_and(cr1, cr2)) as case:
with case(0):
qc2.x(0)
with case(1):
qc2.z(0)
with case(2):
qc2.x(1)
with case(case.DEFAULT):
qc2.z(1)
# Cell 4: integration testing
import io
from qiskit import transpile, qasm3, qpy
from qiskit_aer import AerSimulator
backend = AerSimulator(method="statevector")
transpiled = transpile(qc1, backend)
with io.BytesIO() as fptr:
qpy.dump(transpiled, fptr)
fptr.seek(0)
loaded = qpy.load(fptr)[0]
print(qasm3.dumps(loaded))
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.compiler import transpile
from qiskit.circuit.random import random_circuit
from qiskit_aer import AerSimulator
qc = random_circuit(16, 3, measure=True)
backend = AerSimulator()
t_qc = transpile(qc, backend, optimization_level=3, init_method='qubit_reuse')
t_qc_original = transpile(qc, backend, optimization_level=3) # For comparison later
qc.draw('mpl', fold=-1)
t_qc.draw('mpl', fold=-1)
from qiskit.quantum_info import hellinger_fidelity
counts_normal = backend.run(t_qc_original, shots=2**12).result().get_counts()
counts_reduced = backend.run(t_qc, shots=2**12).result().get_counts()
hellinger_fidelity(counts_normal, counts_reduced)
def build_bv_circuit(secret_string):
input_size = len(secret_string)
num_qubits = input_size + 1
qr = QuantumRegister(num_qubits)
cr = ClassicalRegister(input_size)
qc = QuantumCircuit(qr, cr, name="main")
qc.x(qr[input_size])
qc.h(qr)
qc.barrier()
for i_qubit in range(input_size):
if secret_string[input_size - 1 - i_qubit] == "1":
qc.cx(qr[i_qubit], qr[input_size])
qc.barrier()
qc.h(qr)
qc.x(qr[input_size])
qc.barrier()
qc.measure(qr[:-1], cr)
return qc
secret = 5871
secret_string = format(secret, f"0b")
qc = build_bv_circuit(secret_string=secret_string)
qc.draw('mpl', fold=-1)
t_qc = transpile(qc, backend, optimization_level=3, init_method='qubit_reuse')
t_qc_original = transpile(qc, backend, optimization_level=3) # For comparison later
t_qc.draw('mpl', fold=-1)
counts_normal = backend.run(t_qc_original, shots=2**12).result().get_counts()
counts_reduced = backend.run(t_qc, shots=2**12).result().get_counts()
print(counts_normal, counts_reduced)
hellinger_fidelity(counts_normal, counts_reduced)
# Matrix Product State generator
from qiskit.circuit.library import RealAmplitudes
def MPS(num_qubits, **kwargs):
"""
Constructs a Matrix Product State (MPS) quantum circuit.
Args:
num_qubits (int): The number of qubits in the circuit.
**kwargs: Additional keyword arguments to be passed to the
RealAmplitudes.
Returns:
QuantumCircuit: The constructed MPS quantum circuit.
"""
qc = QuantumCircuit(num_qubits)
qubits = range(num_qubits)
print(len(list(zip(qubits[:-1], qubits[1:]))))
# Iterate over adjacent qubit pairs
for i, j in zip(qubits[:-1], qubits[1:]):
qc.compose(RealAmplitudes(num_qubits=2,
parameter_prefix=f'θ_{i},{j}',
**kwargs), [i, j],
inplace=True)
qc.barrier(
) # Add a barrier after each block for clarity and separation
return qc
# Tree tuples generator
def _generate_tree_tuples(n):
"""
Generate a list of tuples representing the tree structure
of consecutive numbers up to n.
Args:
n (int): The number up to which the tuples are generated.
Returns:
list: A list of tuples representing the tree structure.
"""
tuples_list = []
indices = []
# Generate initial tuples with consecutive numbers up to n
for i in range(0, n, 2):
tuples_list.append((i, i + 1))
indices += [tuples_list]
# Perform iterations until we reach a single tuple
while len(tuples_list) > 1:
new_tuples = []
# Generate new tuples by combining adjacent larger numbers
for i in range(0, len(tuples_list), 2):
new_tuples.append((tuples_list[i][1], tuples_list[i + 1][1]))
tuples_list = new_tuples
indices += [tuples_list]
return indices
# Tree Tensor Network Generator
def TTN(num_qubits, **kwargs):
"""
Constructs a Tree Tensor Network (TTN) quantum circuit.
Args:
num_qubits (int): The number of qubits in the circuit.
**kwargs: Additional keyword arguments to be passed to the
RealAmplitudes.
Returns:
QuantumCircuit: The constructed TTN quantum circuit.
Raises:
AssertionError: If the number of qubits is not a power of 2
or zero.
"""
qc = QuantumCircuit(num_qubits)
qubits = range(num_qubits)
# Compute qubit indices
assert num_qubits & (
num_qubits -
1) == 0 and num_qubits != 0, "Number of qubits must be a power of 2"
indices = _generate_tree_tuples(num_qubits)
# Iterate over each layer of TTN indices
for layer_indices in indices:
for i, j in layer_indices:
qc.compose(RealAmplitudes(num_qubits=2,
parameter_prefix=f'θ_{i},{j}',
**kwargs), [i, j],
inplace=True)
qc.barrier(
) # Add a barrier after each layer for clarity and separation
return qc
import numpy as np
qc = TTN(2**3)
qc.measure_all()
params = {param: np.pi/(2**((index%3)+1)) for index, param in enumerate(qc.parameters)}
qc = qc.bind_parameters(params)
qc.draw('mpl', fold=-1)
t_qc = transpile(qc, backend, optimization_level=3, init_method='qubit_reuse')
t_qc_original = transpile(qc, backend, optimization_level=3) # For comparison later
t_qc = transpile(qc, backend, optimization_level=3, init_method='qubit_reuse')
t_qc_original = transpile(qc, backend, optimization_level=3) # For comparison later
t_qc.draw('mpl', fold=-1)
counts_normal = backend.run(t_qc_original, shots=2**12).result().get_counts()
counts_reduced = backend.run(t_qc, shots=2**12).result().get_counts()
# print(counts_normal, counts_reduced)
hellinger_fidelity(counts_normal, counts_reduced)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import qiskit
qiskit.__version__
from qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.compiler import transpile
qreg = QuantumRegister(2)
creg = ClassicalRegister(2)
qc = QuantumCircuit(qreg, creg)
qc.h([0, 1])
qc.h([0, 1])
qc.h([0, 1])
qc.measure([0, 1], [0, 1])
with qc.switch(creg) as case:
with case(0): # if the register is '00'
qc.z(0)
with case(1, 2): # if the register is '01' or '10'
qc.cx(0, 1)
with case(case.DEFAULT): # the default case
qc.h(0)
qc.draw("mpl")
from qiskit.compiler import transpile
from qiskit.providers.fake_provider import FakeBelemV2
from qiskit.circuit import SwitchCaseOp, IfElseOp, WhileLoopOp
backend = FakeBelemV2()
backend.target.add_instruction(SwitchCaseOp, name="switch_case")
backend.target.add_instruction(IfElseOp, name="if_else")
backend.target.add_instruction(WhileLoopOp, name="while_loop")
transpile(qc, backend, optimization_level=2, seed_transpiler=671_44).draw('mpl')
transpile(qc, backend, optimization_level=3, seed_transpiler=671_44).draw('mpl')
from qiskit.circuit.classical import expr
qr = QuantumRegister(4)
cr1 = ClassicalRegister(2)
cr2 = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr1, cr2)
qc.h(0)
qc.cx(0, 1)
qc.h(2)
qc.cx(2, 3)
qc.measure([0, 1, 2, 3], [0, 1, 2, 3])
# If the two registers are equal to each other.
with qc.if_test(expr.equal(cr1, cr2)):
qc.x(0)
# While either of two bits are set.
with qc.while_loop(expr.logic_or(cr1[0], cr1[1])):
qc.reset(0)
qc.reset(1)
qc.measure([0, 1], cr1)
qc.draw('mpl')
from qiskit.qasm3 import dumps
print(dumps(transpile(qc, backend, optimization_level=3)))
from qiskit.algorithms import VQE
import itertools
from math import pi
from qiskit.circuit.library import EfficientSU2
from qiskit.providers.fake_provider import FakeSherbrooke
backend = FakeSherbrooke()
circuit = EfficientSU2(127, entanglement='linear', reps=50)
print(f"Number of circuit parameters: {len(circuit.parameters)}")
value_cycle = itertools.cycle([0, pi / 4, pi / 2, 3*pi / 4, pi, 2* pi])
%timeit -n 1 circuit.assign_parameters([x[1] for x in zip(range(len(circuit.parameters)), value_cycle)], inplace=False)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
from qiskit import __qiskit_version__
print(__qiskit_version__)
from qiskit.tools.jupyter import *
%qiskit_version_table
from qiskit import __version__
print(__version__)
from qiskit.version import get_version_info
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import numpy as np
from IPython.display import Latex
from qiskit import QuantumCircuit
from qiskit.quantum_info import Statevector, schmidt_decomposition
ζ = Statevector([1/np.sqrt(2),0,0,1/np.sqrt(2)])
ζ.draw('latex',prefix='|\\zeta\\rangle = ')
ζ_sd = schmidt_decomposition(ζ,[0])
print(ζ_sd)
for i, (s,u,v) in enumerate(ζ_sd):
print('---')
display(Latex(f"$$\\lambda_{i} = {s}$$"))
display(u.draw('latex',prefix=f'|u_{i}\\rangle = '))
display(v.draw('latex',prefix=f'|v_{i}\\rangle = '))
w = Statevector([0,1/np.sqrt(3),1/np.sqrt(3),0,1/np.sqrt(3),0,0,0])
w.draw('latex',prefix='|w\\rangle = ')
w_sd = schmidt_decomposition(w,[0,1])
for i, (s,u,v) in enumerate(w_sd):
print('---')
display(Latex(f"$$\\lambda_{i} = {s}$$"))
display(u.draw('latex',prefix=f'|u_{i}\\rangle = '))
display(v.draw('latex',prefix=f'|v_{i}\\rangle = '))
Statevector(sum([suv[0]*np.kron(suv[1],suv[2]) for suv in w_sd]))
χ = Statevector(np.array([1,0,0,0,1,0,0,0,1])*1/np.sqrt(3),dims=(3,3))
χ.draw('latex', prefix='|\\chi\\rangle = ',max_size=10)
w_sd = schmidt_decomposition(χ,[0])
for i, (s,u,v) in enumerate(w_sd):
print('---')
display(Latex(f"$$\\lambda_{i} = {s}$$"))
display(u.draw('latex',prefix=f'|u_{i}\\rangle = '))
display(v.draw('latex',prefix=f'|v_{i}\\rangle = '))
dims = (2,3,4)
k = Statevector(np.arange(np.prod(dims)),dims=dims)
k.draw('latex',max_size=24)
k_sd = schmidt_decomposition(k,[0,2])
for i, (s,u,v) in enumerate(k_sd):
print('---')
display(Latex(f"$$\\lambda_{i} = {s}$$"))
display(u.draw('latex',prefix=f'|u_{i}\\rangle = '))
display(v.draw('latex',prefix=f'|v_{i}\\rangle = '))
Statevector(sum([suv[0]*np.kron(suv[1],suv[2]) for suv in k_sd]),dims=dims).draw('latex',max_size=24)
ξ = Statevector.from_label('++')
ξ.draw('latex',prefix='|\\xi\\rangle = ')
ξ_sd = schmidt_decomposition(ξ,[0])
for i, (s,u,v) in enumerate(ξ_sd):
print('---')
display(Latex(f"$$\\lambda_{i} = {np.round(s,6)}$$"))
display(u.draw('latex',prefix=f'|u_{i}\\rangle = '))
display(v.draw('latex',prefix=f'|v_{i}\\rangle = '))
from schmidt_decomposition_local import schmidt_decomposition as sd_loc
ξ_sd = sd_loc(ξ,[0])
for i, (s,u,v) in enumerate(ξ_sd):
print('---')
display(Latex(f"$$\\lambda_{i} = {np.round(s,6)}$$"))
display(u.draw('latex',prefix=f'|u_{i}\\rangle = '))
display(v.draw('latex',prefix=f'|v_{i}\\rangle = '))
|
https://github.com/Qiskit/feedback
|
Qiskit
|
# imports
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import PermutationGate
from qiskit.transpiler.passes.synthesis.high_level_synthesis import (
HighLevelSynthesis,
HighLevelSynthesisPluginManager,
HighLevelSynthesisPlugin,
HLSConfig,
)
from qiskit.compiler import transpile
from qiskit.transpiler import CouplingMap
qc = QuantumCircuit(6)
perm = PermutationGate([1, 2, 3, 4, 0])
qc.append(perm, [1, 2, 3, 4, 5])
qc.draw(output='mpl')
qct = transpile(qc)
qct.draw(output='mpl')
qct = HighLevelSynthesis()(qc)
qct.draw(output='mpl')
print(HighLevelSynthesisPluginManager().method_names("permutation"))
config = HLSConfig(permutation=["acg"])
qct = HighLevelSynthesis(hls_config=config)(qc)
qct.draw(output='mpl')
config = HLSConfig(permutation=["kms"])
qct = HighLevelSynthesis(hls_config=config)(qc)
qct.draw(output='mpl')
config = HLSConfig(permutation=["kms"])
qct = transpile(qc, hls_config=config)
qct.draw(output='mpl')
config = HLSConfig(permutation=["token_swapper"])
qct = HighLevelSynthesis(hls_config=config)(qc)
qct.draw(output='mpl')
config = HLSConfig(permutation=["token_swapper"])
coupling_map = CouplingMap([[1, 2], [1, 3], [1, 4], [1, 5]])
qct = HighLevelSynthesis(hls_config=config, coupling_map=coupling_map, use_qubit_indices=True)(qc)
qct.draw(output='mpl')
config = HLSConfig(permutation=["token_swapper"])
coupling_map = CouplingMap([[1, 2], [2, 3], [2, 4], [2, 5]])
qct = HighLevelSynthesis(hls_config=config, coupling_map=coupling_map, use_qubit_indices=True)(qc)
qct.draw(output='mpl')
config = HLSConfig(permutation=["token_swapper"])
coupling_map = CouplingMap.from_ring(6)
qct = HighLevelSynthesis(hls_config=config, coupling_map=coupling_map, use_qubit_indices=True)(qc)
qct.draw(output='mpl')
coupling_map = CouplingMap([[0, 1], [1, 2], [3, 4], [4, 5]])
config = HLSConfig(permutation=["token_swapper"])
qc = QuantumCircuit(6)
qc.append(PermutationGate([1, 0, 3, 2]), [1, 2, 3, 4])
config = HLSConfig(permutation=[("token_swapper", {"trials": 10})])
qct = HighLevelSynthesis(hls_config=config, coupling_map=coupling_map, use_qubit_indices=True)(qc)
qct.draw(output='mpl')
coupling_map = CouplingMap([[0, 1], [1, 2], [3, 4], [4, 5]])
config = HLSConfig(permutation=["token_swapper"])
qc = QuantumCircuit(6)
qc.append(PermutationGate([1, 0, 3, 2]), [1, 4, 2, 3])
config = HLSConfig(permutation=[("token_swapper", {"trials": 10})])
qct = HighLevelSynthesis(hls_config=config, coupling_map=coupling_map, use_qubit_indices=True)(qc)
qct.draw(output='mpl')
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import numpy as np
import qiskit
from qiskit import QuantumCircuit
from qiskit import transpile
from qiskit.providers.fake_provider import FakeManhattanV2
from qiskit.circuit.library import *
from qiskit.synthesis import *
from qiskit.quantum_info import *
from qiskit.synthesis.linear import random_invertible_binary_matrix
cliff = random_clifford(50)
device = FakeManhattanV2() # 65 qubits
num_qubits = device.num_qubits
coupling_map = device.coupling_map
qc = synth_clifford_greedy(cliff)
print(f"original circuit has: "
f"2q-depth {qc.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
qc_trans = transpile(qc, basis_gates = ['rz', 'sx', 'cx'], coupling_map = edges, optimization_level = 3)
print(f"transpiled circuit has: "
f"2q-depth {qc_trans.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc_trans.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc_trans.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
qc_lnn = synth_clifford_depth_lnn(cliff).decompose()
print(f"LNN circuit has: "
f"2q-depth {qc_lnn.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc_lnn.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc_lnn.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
# layout is a line of connected qubits
layout = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 26,
37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 38,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54,
64, 63, 62, 61, 60, 59, 58, 57, 56, 55]
num_qubits = qc_lnn.num_qubits
initial_layout = layout[0:num_qubits]
qc_lnn_trans = transpile(qc_lnn, coupling_map=coupling_map, basis_gates=['rz', 'sx', 'cx'],
optimization_level=1, initial_layout=initial_layout)
print(f"transpiled LNN circuit has: "
f"2q-depth {qc_lnn_trans.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc_lnn_trans.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc_lnn_trans.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
num_qubits = 50
# input: random invertible binary matrix
mat = random_invertible_binary_matrix(num_qubits)
qc = synth_cnot_depth_line_kms(mat)
print(f"Linear circuit has: "
f"2q-depth {qc.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
num_qubits = 50
# input: random permutation
pattern = np.random.permutation(num_qubits)
qc = synth_permutation_depth_lnn_kms(pattern)
print(f"Permutation circuit has: "
f"2q-depth {qc.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
num_qubits = 50
# input: an upper-diagonal matrix representing the CZ circuit. mat[i][j]=1 for i<j represents a CZ(i,j) gate
qc = synth_cz_depth_line_mr(mat)
print(f"CZ circuit has: "
f"2q-depth {qc.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
cliff = random_clifford(5)
qc = synth_clifford_layers(cliff)
qc.draw('mpl')
cliff = random_clifford(50)
qc = synth_clifford_depth_lnn(cliff).decompose()
print(f"Clifford circuit has: "
f"2q-depth {qc.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
cliff = random_clifford(5)
stab = StabilizerState(cliff)
qc = synth_stabilizer_layers(stab)
qc.draw('mpl')
cliff = random_clifford(50)
stab = StabilizerState(cliff)
qc = synth_stabilizer_depth_lnn(stab).decompose()
print(f"Stabilizer circuit has: "
f"2q-depth {qc.depth(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"2q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 2)}, "
f"1q-count {qc.size(filter_function=lambda x: x.operation.num_qubits == 1)}. ")
|
https://github.com/Qiskit/feedback
|
Qiskit
|
import qiskit
qiskit.__version__
from qiskit.circuit.library import XGate, CZGate, RZGate
from qiskit.circuit import QuantumCircuit
XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate() is XGate()
CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate() is CZGate()
gate = XGate()
print(f"name: {gate.name}")
print(f"params: {gate.params}")
print(f"matrix:\n{gate.to_matrix()}")
print(f"definition:\n{gate.definition}")
gate = XGate()
print(f"name: {gate.name}")
print(f"params: {gate.params}")
print(f"matrix:\n{gate.to_matrix()}")
print(f"definition:\n{gate.definition}")
import math
RZGate(math.pi) is RZGate(math.pi / 2)
RZGate(math.pi) is RZGate(math.pi)
gate = XGate()
try:
gate.label = "My bespoke x gate that gives me perfect results"
except TypeError as e:
print(e)
gate = XGate().to_mutable()
gate.label = "My bespoke x gate that gives me perfect results"
labelled_gate = XGate(label="My bespoke x gate that gives me perfect results")
print(labelled_gate.mutable)
qc = QuantumCircuit(1)
qc.append(gate, [0])
qc.append(labelled_gate, [0])
print(qc)
from qiskit import passmanager
class ToyPassManager(passmanager.BasePassManager):
"""Synthetic example PassManager that takes input "programs" integers operates on them as strings and returns a final integer
The transpiler's passmanager defines ``_passmanager_frontend`` as QuantumCircuit -> DAGCircuit and ``_passmanager_backend``
as DAGCircuit -> QuantumCircuit
"""
def _passmanager_frontend(self, input_program: int, **kwargs) -> str:
"""Convert input int to str "IR"""
return str(input_program)
def _passmanager_backend(self, passmanager_ir: str, in_program: int, **kwargs) -> int:
"""Convert str "IR" to output int"""
return int(passmanager_ir)
class RemoveFive(passmanager.GenericPass):
"""Pass for ToyPassManager that removes the number 5 from a string "IR"""
def run(self, passmanager_ir: str):
return passmanager_ir.replace("5", "")
pm = ToyPassManager(RemoveFive())
pm.run([123456789, 12345, 555552])
from qiskit.providers.fake_provider import FakeNairobiV2
from qiskit import transpile
qc = QuantumCircuit(5)
qc.x(4)
qc.h(range(5))
qc.cx(4, range(4))
tqc = transpile(qc, FakeNairobiV2(), optimization_level=3)
tqc.draw('mpl', fold=-1)
tqc.layout.initial_virtual_layout(filter_ancillas=True)
tqc.layout.final_index_layout()
from qiskit.circuit.library import RealAmplitudes
from qiskit.quantum_info import SparsePauliOp
from qiskit.primitives import BackendEstimator
from qiskit.compiler import transpile
psi = RealAmplitudes(num_qubits=2, reps=2)
H1 = SparsePauliOp.from_list([("II", 1), ("IZ", 2), ("XI", 3)])
backend = FakeNairobiV2()
estimator = BackendEstimator(backend=backend, skip_transpilation=True)
thetas = [0, 1, 1, 2, 3, 5]
transpiled_psi = transpile(psi, backend, optimization_level=3)
# New in 0.45.0 inflate and permute:
print(f"original operator:\n{H1}\n")
permuted_op = H1.apply_layout(transpiled_psi.layout)
print(f"final layout: {transpiled_psi.layout.final_index_layout()}\n")
print(f"operator with layout applied:\n{permuted_op}\n")
res = estimator.run(transpiled_psi, permuted_op, thetas, shots=int(1e5)).result()
print(res)
|
https://github.com/Qiskit/feedback
|
Qiskit
|
!pip install pyRiemann-qiskit
!pip install moabb
from pyriemann_qiskit.pipelines import (
QuantumMDMWithRiemannianPipeline,
)
from moabb.datasets import BI2012
from moabb.paradigms import P300
from sklearn.model_selection import train_test_split
from sklearn.metrics import balanced_accuracy_score
from sklearn.preprocessing import LabelEncoder
from time import time
# Instantiate dataset
# This has to be adapted for the data you used
dataset = BI2012() #selected dataset
# Instantiate paradigm:
# It defines the pre-processing steps for data extraction
# This is specific to the MOABB library
paradigm = P300()
# Get first subject's data
X, y, _ = paradigm.get_data(dataset, subjects=[1])
# Encode class label into numeric value
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
# Let's keep it simple without cross-validation
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100, stratify=y)
# Instantiate our pipeline (inherits from sklearn mixins)
clf = QuantumMDMWithRiemannianPipeline(
convex_metric="distance", quantum=True
)
display(clf._pipe)
start_time = time()
clf.fit(X_train, y_train)
fit_time = time() - start_time
# Perform classification (prediction)
# This use QAOA and a Aer/statevector simulator.
# An account token can also be provided to the estimator
# to run on real quantum backend
start_time = time()
y_pred = clf.predict(X_test)
pred_time = time() - start_time
# Compute dataset balance
n_class_0 = y[y == 1].shape[0]
n_class_1 = y[y == 0].shape[0]
# Our dataset is not balanced
balance = n_class_0 / n_class_1
print(f'balance = {balance}')
# Then we will use the balanced-accuracy
# which is a better metric than accuracy in this case:
score = balanced_accuracy_score(y_test, y_pred)
# Do not pay to much attention to the score.
# There is no cross-validation here.
# It likely just depends on the random_state.
print(f'score = {score}')
# Print execution time
# The fitting is classical, the prediction (here on simulator) is quantum.
# Other versions of the algorithm exist (with fitting quantum and prediction classical)
print("Fit time: {:.3f}s".format(fit_time))
print("Prediction time: {:.3f}s".format(pred_time))
# Time required for the classfication of a single trial:
n_train = y_train.shape[0]
n_test = y_test.shape[0]
print("Fit time (1 example): {:.3f}s".format(fit_time / n_train))
print("Prediction time (1 example): {:.3f}s".format(pred_time / n_test))
|
https://github.com/tigerjack/qiskit_grover
|
tigerjack
|
from math import sqrt, pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
import oracle_simple
import composed_gates
def get_circuit(n, oracles):
"""
Build the circuit composed by the oracle black box and the other quantum gates.
:param n: The number of qubits (not including the ancillas)
:param oracles: A list of black box (quantum) oracles; each of them selects a specific state
:returns: The proper quantum circuit
:rtype: qiskit.QuantumCircuit
"""
cr = ClassicalRegister(n)
## Testing
if n > 3:
#anc = QuantumRegister(n - 1, 'anc')
# n qubits for the real number
# n - 1 qubits for the ancillas
qr = QuantumRegister(n + n - 1)
qc = QuantumCircuit(qr, cr)
else:
# We don't need ancillas
qr = QuantumRegister(n)
qc = QuantumCircuit(qr, cr)
## /Testing
print("Number of qubits is {0}".format(len(qr)))
print(qr)
# Initial superposition
for j in range(n):
qc.h(qr[j])
# The length of the oracles list, or, in other words, how many roots of the function do we have
m = len(oracles)
# Grover's algorithm is a repetition of an oracle box and a diffusion box.
# The number of repetitions is given by the following formula.
print("n is ", n)
r = int(round((pi / 2 * sqrt((2**n) / m) - 1) / 2))
print("Repetition of ORACLE+DIFFUSION boxes required: {0}".format(r))
oracle_t1 = oracle_simple.OracleSimple(n, 5)
oracle_t2 = oracle_simple.OracleSimple(n, 0)
for j in range(r):
for i in range(len(oracles)):
oracles[i].get_circuit(qr, qc)
diffusion(n, qr, qc)
for j in range(n):
qc.measure(qr[j], cr[j])
return qc, len(qr)
def diffusion(n, qr, qc):
"""
The Grover diffusion operator.
Given the arry of qiskit QuantumRegister qr and the qiskit QuantumCircuit qc, it adds the diffusion operator to the appropriate qubits in the circuit.
"""
for j in range(n):
qc.h(qr[j])
# D matrix, flips state |000> only (instead of flipping all the others)
for j in range(n):
qc.x(qr[j])
# 0..n-2 control bits, n-1 target, n..
if n > 3:
composed_gates.n_controlled_Z_circuit(
qc, [qr[j] for j in range(n - 1)], qr[n - 1],
[qr[j] for j in range(n, n + n - 1)])
else:
composed_gates.n_controlled_Z_circuit(
qc, [qr[j] for j in range(n - 1)], qr[n - 1], None)
for j in range(n):
qc.x(qr[j])
for j in range(n):
qc.h(qr[j])
|
https://github.com/tigerjack/qiskit_grover
|
tigerjack
|
circuit = None
oracle_simple = None
execute = None
Aer = None
IBMQ = None
least_busy = None
def _import_modules():
print("Importing modules")
global circuit, oracle_simple, execute, least_busy
import circuit
import oracle_simple
from qiskit import execute
from qiskit.backends.ibmq import least_busy
# To be used from REPL
def build_and_infos(n, x_stars, real=False, online=False, backend_name=None):
_import_modules()
oracles = []
for i in range(len(x_stars)):
oracles.append(oracle_simple.OracleSimple(n, x_stars[i]))
gc, n_qubits = circuit.get_circuit(n, oracles)
if real:
online = True
backend, max_credits, shots = get_appropriate_backend(
n_qubits, real, online, backend_name)
res = get_compiled_circuit_infos(gc, backend, max_credits, shots)
return res
# To be used from REPL
def build_and_run(n, x_stars, real=False, online=False, backend_name=None):
"""
This just build the grover circuit for the specific n and x_star
and run them on the selected backend.
It may be convenient to use in an interactive shell for quick testing.
"""
_import_modules()
oracles = []
for i in range(len(x_stars)):
oracles.append(oracle_simple.OracleSimple(n, x_stars[i]))
gc, n_qubits = circuit.get_circuit(n, oracles)
if real:
online = True
backend, max_credits, shots = get_appropriate_backend(
n_qubits, real, online, backend_name)
return run_grover_algorithm(gc, backend, max_credits, shots)
def get_appropriate_backend(n, real, online, backend_name):
# Online, real or simuator?
if (not online):
global Aer
from qiskit import Aer
max_credits = 10
shots = 4098
print("Local simulator backend")
backend = Aer.get_backend('qasm_simulator')
# list of online devices: ibmq_qasm_simulator, ibmqx2, ibmqx4, ibmqx5, ibmq_16_melbourne
else:
global IBMQ
from qiskit import IBMQ
print("Online {0} backend".format("real" if real else "simulator"))
max_credits = 3
shots = 4098
import Qconfig
IBMQ.load_accounts()
if (backend_name is not None):
backend = IBMQ.get_backend(backend_name)
else:
large_enough_devices = IBMQ.backends(
filters=lambda x: x.configuration()['n_qubits'] >= n and x.configuration()[
'simulator'] == (not real)
)
backend = least_busy(large_enough_devices)
print("Backend name is {0}; max_credits = {1}, shots = {2}".format(
backend, max_credits, shots))
return backend, max_credits, shots
def get_compiled_circuit_infos(qc, backend, max_credits, shots):
result = {}
print("Getting infos ... ")
backend_coupling = backend.configuration()['coupling_map']
from qiskit import compile
grover_compiled = compile(
qc, backend=backend, coupling_map=backend_coupling, shots=shots)
grover_compiled_qasm = grover_compiled.experiments[
0].header.compiled_circuit_qasm
result['n_gates'] = len(grover_compiled_qasm.split("\n")) - 4
return result
def run_grover_algorithm(qc, backend, max_credits, shots):
"""
Run the grover algorithm, i.e. the quantum circuit qc.
:param qc: The (qiskit) quantum circuit
:param real: False (default) to run the circuit on a simulator backend, True to run on a real backend
:param backend_name: None (default) to run the circuit on the default backend (local qasm for simulation, least busy IBMQ device for a real backend); otherwise, it should contain the name of the backend you want to use.
:returns: Result of the computations, i.e. the dictionary of result counts for an execution.
:rtype: dict
"""
_import_modules()
pending_jobs = backend.status()['pending_jobs']
print("Backend has {0} pending jobs".format(pending_jobs))
print("Compiling ... ")
job = execute(qc, backend, shots=shots, max_credits=max_credits)
print("Job id is {0}".format(job.job_id()))
print(
"At this point, if any error occurs, you can always retrieve the job results using the backend name and the job id using the utils/retrieve_job_results.py script"
)
if pending_jobs > 1:
s = input(
"Do you want to wait for the job to go up in the queue list or exit the program(q)? If you exit now you can still retrieve the job results later on using the backend name and the job id w/ the utils/retrieve_job_results.py script"
)
if (s == "q"):
print(
"WARNING: Take note of backend name and job id to retrieve the job"
)
from sys import exit
exit()
result = job.result()
return result.get_counts(qc)
def get_max_key_value(counts):
mx = max(counts.keys(), key=(lambda key: counts[key]))
total = sum(counts.values())
confidence = counts[mx] / total
return mx, confidence
def main():
import argparse
parser = argparse.ArgumentParser(description="Grover algorithm")
parser.add_argument(
'n',
metavar='n',
type=int,
help='the number of bits used to store the oracle data.')
parser.add_argument(
'x_stars',
metavar='x_star',
type=int,
nargs='+',
help='the number(s) for which the oracle returns 1, in the range [0..2**n-1].'
)
parser.add_argument(
'-r',
'--real',
action='store_true',
help='Invoke the real device (implies -o). Default is simulator.')
parser.add_argument(
'-o',
'--online',
action='store_true',
help='Use the online IBMQ devices. Default is local (simulator). This option is automatically set when we want to use a real device (see -r).'
)
parser.add_argument(
'-i',
'--infos',
action='store_true',
help='Print only infos on the circuit built for the specific backend (such as the number of gates) without executing it.'
)
parser.add_argument(
'-b',
'--backend_name',
help="The name of the backend. It makes sense only for online ibmq devices and it's useless otherwise. If not specified, the program automatically choose the least busy ibmq backend."
)
parser.add_argument(
'--img_dir',
help='If you want to store the image of the circuit, you need to specify the directory.'
)
parser.add_argument(
'--plot',
action='store_true',
help='Plot the histogram of the results. Default is false')
args = parser.parse_args()
n = args.n
x_stars = args.x_stars
print("n: {0}, x_stars: {1}".format(n, x_stars))
real = args.real
online = True if real else args.online
infos = args.infos
backend_name = args.backend_name
img_dir = args.img_dir
plot = args.plot
print("real: {0}, online: {1}, infos: {2}, backend_name: {3}".format(
real, online, infos, backend_name))
_import_modules()
oracles = []
for i in range(len(x_stars)):
oracles.append(oracle_simple.OracleSimple(n, x_stars[i]))
gc, n_qubits = circuit.get_circuit(n, oracles)
if (img_dir is not None):
from qiskit.tools.visualization import circuit_drawer
circuit_drawer(
gc, filename=img_dir + "grover_{0}_{1}.png".format(n, x_stars[0]))
backend, max_credits, shots = get_appropriate_backend(
n_qubits, real, online, backend_name)
if (infos):
res = get_compiled_circuit_infos(gc, backend, max_credits, shots)
for k, v in res.items():
print("{0} --> {1}".format(k, v))
else: # execute
counts = run_grover_algorithm(gc, backend, max_credits, shots)
print(counts)
max, confidence = get_max_key_value(counts)
print("Max value: {0}, confidence {1}".format(max, confidence))
if (plot):
from qiskit.tools.visualization import plot_histogram
plot_histogram(counts)
print("END")
# Assumption: if run from console we're inside src/.. dir
if __name__ == "__main__":
main()
|
https://github.com/tigerjack/qiskit_grover
|
tigerjack
|
from qiskit import IBMQ
import sys
def get_job_status(backend, job_id):
backend = IBMQ.get_backend(backend)
print("Backend {0} is operational? {1}".format(
backend.name(),
backend.status()['operational']))
print("Backend was last updated in {0}".format(
backend.properties()['last_update_date']))
print("Backend has {0} pending jobs".format(
backend.status()['pending_jobs']))
ibmq_job = backend.retrieve_job(job_id)
status = ibmq_job.status()
print(status.name)
print(ibmq_job.creation_date())
if (status.name == 'DONE'):
print("EUREKA")
result = ibmq_job.result()
count = result.get_counts()
print("Result is: {0}".format())
from qiskit.tools.visualization import plot_histogram
plot_histogram(counts)
else:
print("... Work in progress ...")
print("Queue position = {0}".format(ibmq_job.queue_position()))
print("Error message = {0}".format(ibmq_job.error_message()))
IBMQ.load_accounts()
print("Account(s) loaded")
if (len(sys.argv) > 2):
get_job_status(sys.argv[1], sys.argv[2])
|
https://github.com/Slimane33/QuantumClassifier
|
Slimane33
|
import numpy as np
from qiskit import *
from qiskit.tools.jupyter import *
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from sklearn.preprocessing import Normalizer
backend = BasicAer.get_backend('qasm_simulator')
def get_angles(x):
beta0 = 2 * np.arcsin(np.sqrt(x[1]) ** 2 / np.sqrt(x[0] ** 2 + x[1] ** 2 + 1e-12))
beta1 = 2 * np.arcsin(np.sqrt(x[3]) ** 2 / np.sqrt(x[2] ** 2 + x[3] ** 2 + 1e-12))
beta2 = 2 * np.arcsin(
np.sqrt(x[2] ** 2 + x[3] ** 2) / np.sqrt(x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2)
)
return np.array([beta2, -beta1 / 2, beta1 / 2, -beta0 / 2, beta0 / 2])
data = np.loadtxt("iris_classes1and2_scaled.txt")
X = data[:, 0:2]
print("First X sample (original) :", X[0])
# pad the vectors to size 2^2 with constant values
padding = 0.3 * np.ones((len(X), 1))
X_pad = np.c_[np.c_[X, padding], np.zeros((len(X), 1))]
print("First X sample (padded) :", X_pad[0])
# normalize each input
normalization = np.sqrt(np.sum(X_pad ** 2, -1))
X_norm = (X_pad.T / normalization).T
print("First X sample (normalized):", X_norm[0])
# angles for state preparation are new features
features = np.array([get_angles(x) for x in X_norm])
print("First features sample :", features[0])
Y = (data[:, -1] + 1) / 2
def statepreparation(a, circuit, target):
a = 2*a
circuit.ry(a[0], target[0])
circuit.cx(target[0], target[1])
circuit.ry(a[1], target[1])
circuit.cx(target[0], target[1])
circuit.ry(a[2], target[1])
circuit.x(target[0])
circuit.cx(target[0], target[1])
circuit.ry(a[3], target[1])
circuit.cx(target[0], target[1])
circuit.ry(a[4], target[1])
circuit.x(target[0])
return circuit
x = X_norm[0]
ang = get_angles(x)
q = QuantumRegister(2)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q,c)
circuit = statepreparation(ang, circuit, [0,1])
circuit.draw(output='mpl')
def u_gate(param, circuit, target):
'''Return the quantum circuit with u3 gate applied on qubit target with param as an iterable'''
circuit.u3(param[0],param[1],param[2],target)
return circuit
def cu_gate(param, circuit, control, target):
'''Return the quantum circuit with cu3 gate applied on qubit target with param as an iterable wrt control'''
circuit.cu3(param[0],param[1],param[2], control, target)
return circuit
def circuit_block(param, circuit, target, same_order_x=True):
'''Return the block applied on qubits target from the circuit circuit
- param : array parameters for the two u gate
- target : array of integer the numero of qubits for the u gates to be applied
- if same_order_x == True : cx(target[0], target[1])
else: cx(target[1], target[0])'''
circuit = u_gate(param[0], circuit, target[0])
circuit = u_gate(param[1], circuit, target[1])
if same_order_x:
circuit.cx(target[0], target[1])
else:
circuit.cx(target[1], target[0])
return circuit
def c_circuit_block(param, circuit, control, target, same_order_x=True):
'''Return the controlled block applied on qubits target from the circuit circuit
- param : array parameters for the two u gate
- target : array of integer the numero of qubits for the u gates to be applied
- if same_order_x == True : cx(target[0], target[1])
else: cx(target[1], target[0])'''
circuit = cu_gate(param[0], circuit, control, target[0])
circuit = cu_gate(param[1], circuit, control, target[1])
if same_order_x:
circuit.ccx(control, target[0], target[1])
else:
circuit.ccx(control, target[1], target[0])
return circuit
def create_circuit(param, circuit, target):
order = True
for i in range(param.shape[0]):
circuit = circuit_block(param[i], circuit, target, order)
order = not order
return circuit
def create_c_circuit(param, circuit, control, target):
order = True
for i in range(param.shape[0]):
circuit = c_circuit_block(param[i], circuit, control, target, order)
order = not order
return circuit
x = np.array([0.53896774, 0.79503606, 0.27826503, 0.0])
ang = get_angles(x)
params = np.array([[[np.pi/3,np.pi/3,np.pi/4],
[np.pi/6,np.pi/4,np.pi/6]],
[[np.pi/6,np.pi/4,np.pi/4],
[np.pi/3,np.pi/7,np.pi/6]],
[[np.pi/3,np.pi/3,np.pi/4],
[np.pi/6,np.pi/4,np.pi/6]],
[[np.pi/6,np.pi/4,np.pi/4],
[np.pi/3,np.pi/7,np.pi/6]]])
q = QuantumRegister(2)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q,c)
circuit = statepreparation(ang, circuit, [0,1])
circuit = create_circuit(params, circuit, [0,1])
circuit.measure(0,c)
circuit.draw(output='mpl')
def execute_circuit(params, angles=None, x=None, use_angles=True, bias=0, shots=1000):
if not use_angles:
angles = get_angles(x)
q = QuantumRegister(2)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q,c)
circuit = statepreparation(angles, circuit, [0,1])
circuit = create_circuit(params, circuit, [0,1])
circuit.measure(0,c)
result = execute(circuit,backend,shots=shots).result()
counts = result.get_counts(circuit)
result=np.zeros(2)
for key in counts:
result[int(key,2)]=counts[key]
result/=shots
return result[1] + bias
execute_circuit(params, ang, bias=0.02)
def predict(probas):
return (probas>=0.5)*1
def binary_crossentropy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss - l * np.log(np.max([p,1e-8]))
loss = loss / len(labels)
return loss
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def cost(params, features, labels):
predictions = [execute_circuit(params, angles=f) for f in features]
return binary_crossentropy(labels, predictions)
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def real(param1, param2, angles, shots=1000):
"""Returns Re{<circuit(param2)|sigma_z|circuit(param1)>}"""
q = QuantumRegister(3)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q,c)
circuit.h(q[0])
circuit = statepreparation(angles, circuit, [1,2])
circuit = create_c_circuit(param1, circuit, 0, [1,2])
circuit.cz(q[0], q[1])
circuit.x(q[0])
circuit = create_c_circuit(param2, circuit, 0, [1,2])
circuit.x(q[0])
circuit.h(q[0])
circuit.measure(q[0],c)
result = execute(circuit,backend,shots=shots).result()
counts = result.get_counts(circuit)
result=np.zeros(2)
for key in counts:
result[int(key,2)]=counts[key]
result/=shots
return (2*result[0]-1)
def imaginary(param1, param2, angles, shots=1000):
"""Returns Im{<circuit(param2)|sigma_z|circuit(param1)>}"""
q = QuantumRegister(3)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q,c)
circuit.h(q[0])
circuit = statepreparation(angles, circuit, [1,2])
circuit = create_c_circuit(param1, circuit, 0, [1,2])
circuit.cz(q[0], q[1])
circuit.x(q[0])
circuit = create_c_circuit(param2, circuit, 0, [1,2])
circuit.x(q[0])
circuit.u1(np.pi/2, q[0])
circuit.h(q[0])
circuit.measure(q[0],c)
result = execute(circuit,backend,shots=shots).result()
counts = result.get_counts(circuit)
result=np.zeros(2)
for key in counts:
result[int(key,2)]=counts[key]
result/=shots
return -(2*result[0]-1)
def gradients(params, angles, label, bias=0):
grads = np.zeros_like(params)
imag = imaginary(params, params, angles)
for i in range(params.shape[0]):
for j in range(params.shape[1]):
params_bis = np.copy(params)
params_bis[i,j,0]+=np.pi
grads[i,j,0] = -0.5 * real(params, params_bis, angles)
params_bis[i,j,0]-=np.pi
params_bis[i,j,1]+=np.pi
grads[i,j,1] = 0.5 * (imaginary(params, params_bis, angles) - imag)
params_bis[i,j,1]-=np.pi
params_bis[i,j,2]+=np.pi
grads[i,j,2] = 0.5 * (imaginary(params, params_bis, angles) - imag)
params_bis[i,j,2]-=np.pi
p = execute_circuit(params, angles, bias=bias)
grad_bias = (p - label) / (p * (1 - p))
grads *= grad_bias
return grads, grad_bias
np.random.seed(0)
num_data = len(Y)
num_train = int(0.75 * num_data)
index = np.random.permutation(range(num_data))
feats_train = features[index[:num_train]]
Y_train = Y[index[:num_train]]
feats_val = features[index[num_train:]]
Y_val = Y[index[num_train:]]
# We need these later for plotting
X_train = X[index[:num_train]]
X_val = X[index[num_train:]]
layers = 6
params_init = np.random.randn(layers,2,3) * 0.01
bias_init = 0.01
batch_size = 5
learning_rate = 0.01
momentum = 0.9
var = np.copy(params_init)
bias = bias_init
v = np.zeros_like(var)
v_bias = 0
for it in range(15):
# Update the weights by one optimizer step
batch_index = np.random.randint(0, num_train, (batch_size,))
feats_train_batch = feats_train[batch_index]
Y_train_batch = Y_train[batch_index]
grads = np.zeros_like(var)
grad_bias = 0
var_corrected = var + momentum * v
bias_corrected = bias + momentum * v_bias
for j in range(batch_size):
g, g_bias = gradients(var_corrected, feats_train_batch[j], Y_train_batch[j], bias)
grads += g / batch_size
grad_bias +=g_bias / batch_size
v = momentum * v - learning_rate * grads
v_bias = momentum * v_bias - learning_rate * grad_bias
var += v
bias += v_bias
# Compute predictions on train and validation set
probas_train = np.array([execute_circuit(var, angles=f, bias=bias) for f in feats_train])
probas_val = np.array([execute_circuit(var, angles=f, bias=bias) for f in feats_val])
predictions_train = predict(probas_train)
predictions_val = predict(probas_val)
# Compute accuracy on train and validation set
acc_train = accuracy(Y_train, predictions_train)
acc_val = accuracy(Y_val, predictions_val)
print(
"Iter: {:5d} | Loss: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost(var, features, Y), acc_train, acc_val)
)
plt.figure()
cm = plt.cm.RdBu
# make data for decision regions
xx, yy = np.meshgrid(np.linspace(0.0, 1.5, 20), np.linspace(0.0, 1.5, 20))
X_grid = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]
# preprocess grid points like data inputs above
padding = 0.3 * np.ones((len(X_grid), 1))
X_grid = np.c_[np.c_[X_grid, padding], np.zeros((len(X_grid), 1))] # pad each input
normalization = np.sqrt(np.sum(X_grid ** 2, -1))
X_grid = (X_grid.T / normalization).T # normalize each input
features_grid = np.array(
[get_angles(x) for x in X_grid]
) # angles for state preparation are new features
predictions_grid = [execute_circuit(var, angles=f, bias=bias, shots=10000) for f in features_grid]
Z = np.reshape(predictions_grid, xx.shape)
# plot decision regions
cnt = plt.contourf(xx, yy, Z, levels=np.arange(0, 1.1, 0.1), cmap=cm, alpha=0.8, extend="both")
plt.contour(xx, yy, Z, levels=[0.0], colors=("black",), linestyles=("--",), linewidths=(0.8,))
plt.colorbar(cnt, ticks=[0, 0, 1])
# plot data
plt.scatter(
X_train[:, 0][Y_train == 1],
X_train[:, 1][Y_train == 1],
c="b",
marker="o",
edgecolors="k",
label="class 1 train",
)
plt.scatter(
X_val[:, 0][Y_val == 1],
X_val[:, 1][Y_val == 1],
c="b",
marker="^",
edgecolors="k",
label="class 1 validation",
)
plt.scatter(
X_train[:, 0][Y_train == 0],
X_train[:, 1][Y_train == 0],
c="r",
marker="o",
edgecolors="k",
label="class 0 train",
)
plt.scatter(
X_val[:, 0][Y_val == 0],
X_val[:, 1][Y_val == 0],
c="r",
marker="^",
edgecolors="k",
label="class 0 validation",
)
plt.legend()
plt.show()
|
https://github.com/HQSquantumsimulations/qoqo-qiskit
|
HQSquantumsimulations
|
from qiskit import Aer, IBMQ, execute
from qiskit import transpile, assemble
from qiskit.tools.monitor import job_monitor
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
from qiskit.visualization import plot_histogram, plot_state_city
"""
Qiskit backends to execute the quantum circuit
"""
def simulator(qc):
"""
Run on local simulator
:param qc: quantum circuit
:return:
"""
backend = Aer.get_backend("qasm_simulator")
shots = 2048
tqc = transpile(qc, backend)
qobj = assemble(tqc, shots=shots)
job_sim = backend.run(qobj)
qc_results = job_sim.result()
return qc_results, shots
def simulator_state_vector(qc):
"""
Select the StatevectorSimulator from the Aer provider
:param qc: quantum circuit
:return:
statevector
"""
simulator = Aer.get_backend('statevector_simulator')
# Execute and get counts
result = execute(qc, simulator).result()
state_vector = result.get_statevector(qc)
return state_vector
def real_quantum_device(qc):
"""
Use the IBMQ essex device
:param qc: quantum circuit
:return:
"""
provider = IBMQ.load_account()
backend = provider.get_backend('ibmq_santiago')
shots = 2048
TQC = transpile(qc, backend)
qobj = assemble(TQC, shots=shots)
job_exp = backend.run(qobj)
job_monitor(job_exp)
QC_results = job_exp.result()
return QC_results, shots
def counts(qc_results):
"""
Get counts representing the wave-function amplitudes
:param qc_results:
:return: dict keys are bit_strings and their counting values
"""
return qc_results.get_counts()
def plot_results(qc_results):
"""
Visualizing wave-function amplitudes in a histogram
:param qc_results: quantum circuit
:return:
"""
plt.show(plot_histogram(qc_results.get_counts(), figsize=(8, 6), bar_labels=False))
def plot_state_vector(state_vector):
"""Visualizing state vector in the density matrix representation"""
plt.show(plot_state_city(state_vector))
|
https://github.com/HQSquantumsimulations/qoqo-qiskit
|
HQSquantumsimulations
|
# Copyright 2022-2023 Ohad Lev.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0,
# or in the root directory of this package("LICENSE.txt").
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`SATInterface` class.
"""
import os
import json
from typing import List, Tuple, Union, Optional, Dict, Any
from sys import stdout
from datetime import datetime
from hashlib import sha256
from qiskit import transpile, QuantumCircuit, qpy
from qiskit.result.counts import Counts
from qiskit.visualization.circuit.text import TextDrawing
from qiskit.providers.backend import Backend
from qiskit.transpiler.passes import RemoveBarriers
from IPython import display
from matplotlib.figure import Figure
from sat_circuits_engine.util import timer_dec, timestamp, flatten_circuit
from sat_circuits_engine.util.settings import DATA_PATH, TRANSPILE_KWARGS
from sat_circuits_engine.circuit import GroverConstraintsOperator, SATCircuit
from sat_circuits_engine.constraints_parse import ParsedConstraints
from sat_circuits_engine.interface.circuit_decomposition import decompose_operator
from sat_circuits_engine.interface.counts_visualization import plot_histogram
from sat_circuits_engine.interface.translator import ConstraintsTranslator
from sat_circuits_engine.classical_processing import (
find_iterations_unknown,
calc_iterations,
ClassicalVerifier,
)
from sat_circuits_engine.interface.interactive_inputs import (
interactive_operator_inputs,
interactive_solutions_num_input,
interactive_run_input,
interactive_backend_input,
interactive_shots_input,
)
# Local globlas for visualization of charts and diagrams
IFRAME_WIDTH = "100%"
IFRAME_HEIGHT = "700"
class SATInterface:
"""
An interface for building, running and mining data from n-SAT problems quantum circuits.
There are 2 options to use this class:
(1) Using an interactive interface (intuitive but somewhat limited) - for this
just initiate a bare instance of this class: `SATInterface()`.
(2) Using the API defined by this class, that includes the following methods:
* The following descriptions are partial, for full annotations see the methods' docstrings.
- `__init__`: an instance of `SATInterface must be initiated with exactly 1 combination:
(a) (high_level_constraints_string + high_level_vars) - for constraints
in a high-level format.
(b) (num_input_qubits + constraints_string) - for constraints
in a low-level foramt.
* For formats annotations see `constriants_format.ipynb` in the main directory.
- `obtain_grover_operator`: obtains the suitable grover operator for the constraints.
- `save_display_grover_operator`: saves and displays data generated
by the `obtain_grover_operator` method.
- `obtain_overall_circuit`: obtains the suitable overall SAT circuit.
- `save_display_overall_circuit: saves and displays data generated
by the `obtain_overall_circuit` method.
- `run_overall_circuit`: executes the overall SAT circuit.
- `save_display_results`: saves and displays data generated
by the `run_overall_circuit` method.
It is very recommended to go through `demos.ipynb` that demonstrates the various optional uses
of this class, in addition to reading `constraints_format.ipynb`, which is a must for using
this package properly. Both notebooks are in ther main directory.
"""
def __init__(
self,
num_input_qubits: Optional[int] = None,
constraints_string: Optional[str] = None,
high_level_constraints_string: Optional[str] = None,
high_level_vars: Optional[Dict[str, int]] = None,
name: Optional[str] = None,
save_data: Optional[bool] = True,
) -> None:
"""
Accepts the combination of paramters:
(high_level_constraints_string + high_level_vars) or (num_input_qubits + constraints_string).
Exactly one combination is accepted.
In other cases either an iteractive user interface will be called to take user's inputs,
or an exception will be raised due to misuse of the API.
Args:
num_input_qubits (Optional[int] = None): number of input qubits.
constraints_string (Optional[str] = None): a string of constraints in a low-level format.
high_level_constraints_string (Optional[str] = None): a string of constraints in a
high-level format.
high_level_vars (Optional[Dict[str, int]] = None): a dictionary that configures
the high-level variables - keys are names and values are bits-lengths.
name (Optional[str] = None): a name for this object, if None than the
generic name "SAT" is given automatically.
save_data (Optional[bool] = True): if True, saves all data and metadata generated by this
class to a unique data folder (by using the `save_XXX` methods of this class).
Raises:
SyntaxError - if a forbidden combination of arguments has been provided.
"""
if name is None:
name = "SAT"
self.name = name
# Creating a directory for data to be saved
if save_data:
self.time_created = timestamp(datetime.now())
self.dir_path = f"{DATA_PATH}{self.time_created}_{self.name}/"
os.mkdir(self.dir_path)
print(f"Data will be saved into '{self.dir_path}'.")
# Initial metadata, more to be added by this class' `save_XXX` methods
self.metadata = {
"name": self.name,
"datetime": self.time_created,
"num_input_qubits": num_input_qubits,
"constraints_string": constraints_string,
"high_level_constraints_string": high_level_constraints_string,
"high_level_vars": high_level_vars,
}
self.update_metadata()
# Identifying user's platform, for visualization purposes
self.identify_platform()
# In the case of low-level constraints format, that is the default value
self.high_to_low_map = None
# Case A - interactive interface
if (num_input_qubits is None or constraints_string is None) and (
high_level_constraints_string is None or high_level_vars is None
):
self.interactive_interface()
# Case B - API
else:
self.high_level_constraints_string = high_level_constraints_string
self.high_level_vars = high_level_vars
# Case B.1 - high-level format constraints inputs
if num_input_qubits is None or constraints_string is None:
self.num_input_qubits = sum(self.high_level_vars.values())
self.high_to_low_map, self.constraints_string = ConstraintsTranslator(
self.high_level_constraints_string, self.high_level_vars
).translate()
# Case B.2 - low-level format constraints inputs
elif num_input_qubits is not None and constraints_string is not None:
self.num_input_qubits = num_input_qubits
self.constraints_string = constraints_string
# Misuse
else:
raise SyntaxError(
"SATInterface accepts the combination of paramters:"
"(high_level_constraints_string + high_level_vars) or "
"(num_input_qubits + constraints_string). "
"Exactly one combination is accepted, not both."
)
self.parsed_constraints = ParsedConstraints(
self.constraints_string, self.high_level_constraints_string
)
def update_metadata(self, update_metadata: Optional[Dict[str, Any]] = None) -> None:
"""
Updates the metadata file (in the unique data folder of a given `SATInterface` instance).
Args:
update_metadata (Optional[Dict[str, Any]] = None):
- If None - just dumps `self.metadata` into the metadata JSON file.
- If defined - updates the `self.metadata` attribute and then dumps it.
"""
if update_metadata is not None:
self.metadata.update(update_metadata)
with open(f"{self.dir_path}metadata.json", "w") as metadata_file:
json.dump(self.metadata, metadata_file, indent=4)
def identify_platform(self) -> None:
"""
Identifies user's platform.
Writes True to `self.jupyter` for Jupyter notebook, False for terminal.
"""
# If True then the platform is a terminal/command line/shell
if stdout.isatty():
self.jupyter = False
# If False, we assume the platform is a Jupyter notebook
else:
self.jupyter = True
def output_to_platform(
self,
*,
title: str,
output_terminal: Union[TextDrawing, str],
output_jupyter: Union[Figure, str],
display_both_on_jupyter: Optional[bool] = False,
) -> None:
"""
Displays output to user's platform.
Args:
title (str): a title for the output.
output_terminal (Union[TextDrawing, str]): text to print for a terminal platform.
output_jupyter: (Union[Figure, str]): objects to display for a Jupyter notebook platform.
can handle `Figure` matplotlib objects or strings of paths to IFrame displayable file,
e.g PDF files.
display_both_on_jupyter (Optional[bool] = False): if True, displays both
`output_terminal` and `output_jupyter` in a Jupyter notebook platform.
Raises:
TypeError - in the case of misusing the `output_jupyter` argument.
"""
print()
print(title)
if self.jupyter:
if isinstance(output_jupyter, str):
display.display(display.IFrame(output_jupyter, width=IFRAME_WIDTH, height=IFRAME_HEIGHT))
elif isinstance(output_jupyter, Figure):
display.display(output_jupyter)
else:
raise TypeError("output_jupyter must be an str (path to image file) or a Figure object.")
if display_both_on_jupyter:
print(output_terminal)
else:
print(output_terminal)
def interactive_interface(self) -> None:
"""
An interactive CLI that allows exploiting most (but not all) of the package's features.
Uses functions of the form `interactive_XXX_inputs` from the `interactive_inputs.py` module.
Divided into 3 main stages:
1. Obtaining Grover's operator for the SAT problem.
2. Obtaining the overall SAT cirucit.
3. Executing the circuit and parsing the results.
The interface is built in a modular manner such that a user can halt at any stage.
The defualt settings for the interactive user intreface are:
1. `name = "SAT"`.
2. `save_data = True`.
3. `display = True`.
4. `transpile_kwargs = {'basis_gates': ['u', 'cx'], 'optimization_level': 3}`.
5. Backends are limited to those defined in the global-constant-like function `BACKENDS`:
- Those are the local `aer_simulator` and the remote `ibmq_qasm_simulator` for now.
Due to these default settings the interactive CLI is somewhat restrictive,
for full flexibility a user should use the API and not the CLI.
"""
# Handling operator part
operator_inputs = interactive_operator_inputs()
self.num_input_qubits = operator_inputs["num_input_qubits"]
self.high_to_low_map = operator_inputs["high_to_low_map"]
self.constraints_string = operator_inputs["constraints_string"]
self.high_level_constraints_string = operator_inputs["high_level_constraints_string"]
self.high_level_vars = operator_inputs["high_level_vars"]
self.parsed_constraints = ParsedConstraints(
self.constraints_string, self.high_level_constraints_string
)
self.update_metadata(
{
"num_input_qubits": self.num_input_qubits,
"constraints_string": self.constraints_string,
"high_level_constraints_string": self.high_level_constraints_string,
"high_level_vars": self.high_level_vars,
}
)
obtain_grover_operator_output = self.obtain_grover_operator()
self.save_display_grover_operator(obtain_grover_operator_output)
# Handling overall circuit part
solutions_num = interactive_solutions_num_input()
if solutions_num is not None:
backend = None
if solutions_num == -1:
backend = interactive_backend_input()
overall_circuit_data = self.obtain_overall_sat_circuit(
obtain_grover_operator_output["operator"], solutions_num, backend
)
self.save_display_overall_circuit(overall_circuit_data)
# Handling circuit execution part
if interactive_run_input():
if backend is None:
backend = interactive_backend_input()
shots = interactive_shots_input()
counts_parsed = self.run_overall_sat_circuit(
overall_circuit_data["circuit"], backend, shots
)
self.save_display_results(counts_parsed)
print()
print(f"Done saving data into '{self.dir_path}'.")
def obtain_grover_operator(
self, transpile_kwargs: Optional[Dict[str, Any]] = None
) -> Dict[str, Union[GroverConstraintsOperator, QuantumCircuit]]:
"""
Obtains the suitable `GroverConstraintsOperator` object for the constraints,
decomposes it using the `circuit_decomposition.py` module and transpiles it
according to `transpile_kwargs`.
Args:
transpile_kwargs (Optional[Dict[str, Any]]): kwargs for Qiskit's transpile function.
The defualt is set to the global constant `TRANSPILE_KWARGS`.
Returns:
(Dict[str, Union[GroverConstraintsOperator, QuantumCircuit, Dict[str, List[int]]]]):
- 'operator' (GroverConstraintsOperator):the high-level blocks operator.
- 'decomposed_operator' (QuantumCircuit): decomposed to building-blocks operator.
* For annotations regarding the decomposition method see the
`circuit_decomposition` module.
- 'transpiled_operator' (QuantumCircuit): the transpiled operator.
*** The high-level operator and the decomposed operator are generated with barriers
between constraints as default for visualizations purposes. The barriers are stripped
off before transpiling so the the transpiled operator object contains no barriers. ***
- 'high_level_to_bit_indexes_map' (Optional[Dict[str, List[int]]] = None):
A map of high-level variables with their allocated bit-indexes in the input register.
"""
print()
print(
"The system synthesizes and transpiles a Grover's "
"operator for the given constraints. Please wait.."
)
if transpile_kwargs is None:
transpile_kwargs = TRANSPILE_KWARGS
self.transpile_kwargs = transpile_kwargs
operator = GroverConstraintsOperator(
self.parsed_constraints, self.num_input_qubits, insert_barriers=True
)
decomposed_operator = decompose_operator(operator)
no_baerriers_operator = RemoveBarriers()(operator)
transpiled_operator = transpile(no_baerriers_operator, **transpile_kwargs)
print("Done.")
return {
"operator": operator,
"decomposed_operator": decomposed_operator,
"transpiled_operator": transpiled_operator,
"high_level_to_bit_indexes_map": self.high_to_low_map,
}
def save_display_grover_operator(
self,
obtain_grover_operator_output: Dict[str, Union[GroverConstraintsOperator, QuantumCircuit]],
display: Optional[bool] = True,
) -> None:
"""
Handles saving and displaying data generated by the `self.obtain_grover_operator` method.
Args:
obtain_grover_operator_output(Dict[str, Union[GroverConstraintsOperator, QuantumCircuit]]):
the dictionary returned upon calling the `self.obtain_grover_operator` method.
display (Optional[bool] = True) - If true, displays objects to user's platform.
"""
# Creating a directory to save operator's data
operator_dir_path = f"{self.dir_path}grover_operator/"
os.mkdir(operator_dir_path)
# Titles for displaying objects, by order of `obtain_grover_operator_output`
titles = [
"The operator diagram - high level blocks:",
"The operator diagram - decomposed:",
f"The transpiled operator diagram saved into '{operator_dir_path}'.\n"
f"It's not presented here due to its complexity.\n"
f"Please note that barriers appear in the high-level diagrams above only for convenient\n"
f"visual separation between constraints.\n"
f"Before transpilation all barriers are removed to avoid redundant inefficiencies.",
]
for index, (op_name, op_obj) in enumerate(obtain_grover_operator_output.items()):
# Generic path and name for files to be saved
files_path = f"{operator_dir_path}{op_name}"
# Generating a circuit diagrams figure
figure_path = f"{files_path}.pdf"
op_obj.draw("mpl", filename=figure_path, fold=-1)
# Generating a QPY serialization file for the circuit object
qpy_file_path = f"{files_path}.qpy"
with open(qpy_file_path, "wb") as qpy_file:
qpy.dump(op_obj, qpy_file)
# Original high-level operator and decomposed operator
if index < 2 and display:
# Displaying to user
self.output_to_platform(
title=titles[index], output_terminal=op_obj.draw("text"), output_jupyter=figure_path
)
# Transpiled operator
elif index == 2:
# Output to user, not including the circuit diagram
print()
print(titles[index])
print()
print(f"The transpilation kwargs are: {self.transpile_kwargs}.")
transpiled_operator_depth = op_obj.depth()
transpiled_operator_gates_count = op_obj.count_ops()
print(f"Transpiled operator depth: {transpiled_operator_depth}.")
print(f"Transpiled operator gates count: {transpiled_operator_gates_count}.")
print(f"Total number of qubits: {op_obj.num_qubits}.")
# Generating QASM 2.0 file only for the (flattened = no registers) tranpsiled operator
qasm_file_path = f"{files_path}.qasm"
flatten_circuit(op_obj).qasm(filename=qasm_file_path)
# Index 3 is 'high_level_to_bit_indexes_map' so it's time to break from the loop
break
# Mapping from high-level variables to bit-indexes will be displayed as well
mapping = obtain_grover_operator_output["high_level_to_bit_indexes_map"]
if mapping:
print()
print(f"The high-level variables mapping to bit-indexes:\n{mapping}")
print()
print(
f"Saved into '{operator_dir_path}':\n",
" Circuit diagrams for all levels.\n",
" QPY serialization exports for all levels.\n",
" QASM 2.0 export only for the transpiled level.",
)
with open(f"{operator_dir_path}operator.qpy", "rb") as qpy_file:
operator_qpy_sha256 = sha256(qpy_file.read()).hexdigest()
self.update_metadata(
{
"high_level_to_bit_indexes_map": self.high_to_low_map,
"transpile_kwargs": self.transpile_kwargs,
"transpiled_operator_depth": transpiled_operator_depth,
"transpiled_operator_gates_count": transpiled_operator_gates_count,
"operator_qpy_sha256": operator_qpy_sha256,
}
)
def obtain_overall_sat_circuit(
self,
grover_operator: GroverConstraintsOperator,
solutions_num: int,
backend: Optional[Backend] = None,
) -> Dict[str, SATCircuit]:
"""
Obtains the suitable `SATCircuit` object (= the overall SAT circuit) for the SAT problem.
Args:
grover_operator (GroverConstraintsOperator): Grover's operator for the SAT problem.
solutions_num (int): number of solutions for the SAT problem. In the case the number
of solutions is unknown, specific negative values are accepted:
* '-1' - for launching a classical iterative stochastic process that finds an adequate
number of iterations - by calling the `find_iterations_unknown` function (see its
docstrings for more information).
* '-2' - for generating a dynamic circuit that iterates over Grover's iterator until
a solution is obtained, using weak measurements. TODO - this feature isn't ready yet.
backend (Optional[Backend] = None): in the case of a '-1' value given to `solutions_num`,
a backend object to execute the depicted iterative prcess upon should be provided.
Returns:
(Dict[str, SATCircuit]):
- 'circuit' key for the overall SAT circuit.
- 'concise_circuit' key for the overall SAT circuit, with only 1 iteration over Grover's
iterator (operator + diffuser). Useful for visualization purposes.
*** The concise circuit is generated with barriers between segments as default
for visualizations purposes. In the actual circuit there no barriers. ***
"""
# -1 = Unknown number of solutions - iterative stochastic process
print()
if solutions_num == -1:
assert backend is not None, "Need to specify a backend if `solutions_num == -1`."
print("Please wait while the system checks various solutions..")
circuit, iterations = find_iterations_unknown(
self.num_input_qubits,
grover_operator,
self.parsed_constraints,
precision=10,
backend=backend,
)
print()
print(f"An adequate number of iterations found = {iterations}.")
# -2 = Unknown number of solutions - implement a dynamic circuit
# TODO this feature isn't fully implemented yet
elif solutions_num == -2:
print("The system builds a dynamic circuit..")
circuit = SATCircuit(self.num_input_qubits, grover_operator, iterations=None)
circuit.add_input_reg_measurement()
iterations = None
# Known number of solutions
else:
print("The system builds the overall circuit..")
iterations = calc_iterations(self.num_input_qubits, solutions_num)
print(f"\nFor {solutions_num} solutions, {iterations} iterations needed.")
circuit = SATCircuit(
self.num_input_qubits, grover_operator, iterations, insert_barriers=False
)
circuit.add_input_reg_measurement()
self.iterations = iterations
# Obtaining a SATCircuit object with one iteration for concise representation
concise_circuit = SATCircuit(
self.num_input_qubits, grover_operator, iterations=1, insert_barriers=True
)
concise_circuit.add_input_reg_measurement()
return {"circuit": circuit, "concise_circuit": concise_circuit}
def save_display_overall_circuit(
self, obtain_overall_sat_circuit_output: Dict[str, SATCircuit], display: Optional[bool] = True
) -> None:
"""
Handles saving and displaying data generated by the `self.obtain_overall_sat_circuit` method.
Args:
obtain_overall_sat_circuit_output(Dict[str, SATCircuit]):
the dictionary returned upon calling the `self.obtain_overall_sat_circuit` method.
display (Optional[bool] = True) - If true, displays objects to user's platform.
"""
circuit = obtain_overall_sat_circuit_output["circuit"]
concise_circuit = obtain_overall_sat_circuit_output["concise_circuit"]
# Creating a directory to save overall circuit's data
overall_circuit_dir_path = f"{self.dir_path}overall_circuit/"
os.mkdir(overall_circuit_dir_path)
# Generating a figure of the overall SAT circuit with just 1 iteration (i.e "concise")
concise_circuit_fig_path = f"{overall_circuit_dir_path}overall_circuit_1_iteration.pdf"
concise_circuit.draw("mpl", filename=concise_circuit_fig_path, fold=-1)
# Displaying the concise circuit to user
if display:
if self.iterations:
self.output_to_platform(
title=(
f"The high level circuit contains {self.iterations}"
f" iterations of the following form:"
),
output_terminal=concise_circuit.draw("text"),
output_jupyter=concise_circuit_fig_path,
)
# Dynamic circuit case - TODO NOT FULLY IMPLEMENTED YET
else:
dynamic_circuit_fig_path = f"{overall_circuit_dir_path}overall_circuit_dynamic.pdf"
circuit.draw("mpl", filename=dynamic_circuit_fig_path, fold=-1)
self.output_to_platform(
title="The dynamic circuit diagram:",
output_terminal=circuit.draw("text"),
output_jupyter=dynamic_circuit_fig_path,
)
if self.iterations:
transpiled_overall_circuit = transpile(flatten_circuit(circuit), **self.transpile_kwargs)
print()
print(f"The transpilation kwargs are: {self.transpile_kwargs}.")
transpiled_overall_circuit_depth = transpiled_overall_circuit.depth()
transpiled_overall_circuit_gates_count = transpiled_overall_circuit.count_ops()
print(f"Transpiled overall-circuit depth: {transpiled_overall_circuit_depth}.")
print(f"Transpiled overall-circuit gates count: {transpiled_overall_circuit_gates_count}.")
print(f"Total number of qubits: {transpiled_overall_circuit.num_qubits}.")
print()
print("Exporting the full overall SAT circuit object..")
export_files_path = f"{overall_circuit_dir_path}overall_circuit"
with open(f"{export_files_path}.qpy", "wb") as qpy_file:
qpy.dump(circuit, qpy_file)
if self.iterations:
transpiled_overall_circuit.qasm(filename=f"{export_files_path}.qasm")
print()
print(
f"Saved into '{overall_circuit_dir_path}':\n",
" A concised (1 iteration) circuit diagram of the high-level overall SAT circuit.\n",
" QPY serialization export for the full overall SAT circuit object.",
)
if self.iterations:
print(" QASM 2.0 export for the transpiled full overall SAT circuit object.")
metadata_update = {
"num_total_qubits": circuit.num_qubits,
"num_iterations": circuit.iterations,
}
if self.iterations:
metadata_update["transpiled_overall_circuit_depth"] = (transpiled_overall_circuit_depth,)
metadata_update[
"transpiled_overall_circuit_gates_count"
] = transpiled_overall_circuit_gates_count
self.update_metadata(metadata_update)
@timer_dec("Circuit simulation execution time = ")
def run_overall_sat_circuit(
self, circuit: QuantumCircuit, backend: Backend, shots: int
) -> Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]:
"""
Executes a `circuit` on `backend` transpiled w.r.t backend, `shots` times.
Args:
circuit (QuantumCircuit): `QuantumCircuit` object or child-object (a.k.a `SATCircuit`)
to execute.
backend (Backend): backend to execute `circuit` upon.
shots (int): number of execution shots.
Returns:
(Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]):
dict object returned by `self.parse_counts` - see this method's docstrings for annotations.
"""
# Defines also instance attributes to use in other methods
self.backend = backend
self.shots = shots
print()
print(f"The system is running the circuit {shots} times on {backend}, please wait..")
print("This process might take a while.")
job = backend.run(transpile(circuit, backend), shots=shots)
counts = job.result().get_counts()
print("Done.")
parsed_counts = self.parse_counts(counts)
return parsed_counts
def parse_counts(
self, counts: Counts
) -> Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]:
"""
Parses a `Counts` object into several desired datas (see 'Returns' section).
Args:
counts (Counts): the `Counts` object to parse.
Returns:
(Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]):
'counts' (Counts) - the original `Counts` object.
'counts_sorted' (List[Tuple[Union[str, int]]]) - results sorted in a descending order.
'distilled_solutions' (List[str]): list of solutions (bitstrings).
'high_level_vars_values' (List[Dict[str, int]]): list of solutions (each solution is a
dictionary with variable-names as keys and their integer values as values).
"""
# Sorting results in an a descending order
counts_sorted = sorted(counts.items(), key=lambda x: x[1], reverse=True)
# Generating a set of distilled verified-only solutions
verifier = ClassicalVerifier(self.parsed_constraints)
distilled_solutions = set()
for count_item in counts_sorted:
if not verifier.verify(count_item[0]):
break
distilled_solutions.add(count_item[0])
# In the case of high-level format in use, translating `distilled_solutions` into integer values
high_level_vars_values = None
if self.high_level_constraints_string and self.high_level_vars:
# Container for dictionaries with variables integer values
high_level_vars_values = []
for solution in distilled_solutions:
# Keys are variable-names and values are their integer values
solution_vars = {}
for var, bits_bundle in self.high_to_low_map.items():
reversed_solution = solution[::-1]
var_bitstring = ""
for bit_index in bits_bundle:
var_bitstring += reversed_solution[bit_index]
# Translating to integer value
solution_vars[var] = int(var_bitstring, 2)
high_level_vars_values.append(solution_vars)
return {
"counts": counts,
"counts_sorted": counts_sorted,
"distilled_solutions": distilled_solutions,
"high_level_vars_values": high_level_vars_values,
}
def save_display_results(
self,
run_overall_sat_circuit_output: Dict[
str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]
],
display: Optional[bool] = True,
) -> None:
"""
Handles saving and displaying data generated by the `self.run_overall_sat_circuit` method.
Args:
run_overall_sat_circuit_output (Dict[str, Union[Counts, List[Tuple[Union[str, int]]],
List[str], List[Dict[str, int]]]]): the dictionary returned upon calling
the `self.run_overall_sat_circuit` method.
display (Optional[bool] = True) - If true, displays objects to user's platform.
"""
counts = run_overall_sat_circuit_output["counts"]
counts_sorted = run_overall_sat_circuit_output["counts_sorted"]
distilled_solutions = run_overall_sat_circuit_output["distilled_solutions"]
high_level_vars_values = run_overall_sat_circuit_output["high_level_vars_values"]
# Creating a directory to save results data
results_dir_path = f"{self.dir_path}results/"
os.mkdir(results_dir_path)
# Defining custom dimensions for the custom `plot_histogram` of this package
histogram_fig_width = max((len(counts) * self.num_input_qubits * (10 / 72)), 7)
histogram_fig_height = 5
histogram_figsize = (histogram_fig_width, histogram_fig_height)
histogram_path = f"{results_dir_path}histogram.pdf"
plot_histogram(counts, figsize=histogram_figsize, sort="value_desc", filename=histogram_path)
if display:
# Basic output text
output_text = (
f"All counts:\n{counts_sorted}\n"
f"\nDistilled solutions ({len(distilled_solutions)} total):\n"
f"{distilled_solutions}"
)
# Additional outputs for a high-level constraints format
if high_level_vars_values:
# Mapping from high-level variables to bit-indexes will be displayed as well
output_text += (
f"\n\nThe high-level variables mapping to bit-indexes:" f"\n{self.high_to_low_map}"
)
# Actual integer solutions will be displayed as well
additional_text = ""
for solution_index, solution in enumerate(high_level_vars_values):
additional_text += f"Solution {solution_index + 1}: "
for var_index, (var, value) in enumerate(solution.items()):
additional_text += f"{var} = {value}"
if var_index != len(solution) - 1:
additional_text += ", "
else:
additional_text += "\n"
output_text += f"\n\nHigh-level format solutions: \n{additional_text}"
self.output_to_platform(
title=f"The results for {self.shots} shots are:",
output_terminal=output_text,
output_jupyter=histogram_path,
display_both_on_jupyter=True,
)
results_dict = {
"high_level_to_bit_indexes_map": self.high_to_low_map,
"solutions": list(distilled_solutions),
"high_level_solutions": high_level_vars_values,
"counts": counts_sorted,
}
with open(f"{results_dir_path}results.json", "w") as results_file:
json.dump(results_dict, results_file, indent=4)
self.update_metadata(
{
"num_solutions": len(distilled_solutions),
"backend": str(self.backend),
"shots": self.shots,
}
)
|
https://github.com/HQSquantumsimulations/qoqo-qiskit
|
HQSquantumsimulations
|
# Copyright © 2023 HQS Quantum Simulations GmbH.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Test file for interface.py."""
import sys
from typing import Union
import pytest
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qoqo import Circuit
from qoqo import operations as ops
from qoqo_qiskit.interface import to_qiskit_circuit # type:ignore
def test_basic_circuit() -> None:
"""Test basic circuit conversion."""
circuit = Circuit()
circuit += ops.Hadamard(0)
circuit += ops.PauliX(1)
circuit += ops.Identity(1)
qc = QuantumCircuit(2)
qc.h(0)
qc.x(1)
qc.id(1)
out_circ, sim_dict = to_qiskit_circuit(circuit)
assert out_circ == qc
assert len(sim_dict["MeasurementInfo"]) == 0
def test_qreg_creg_names() -> None:
"""Test qreg and creg qiskit names."""
circuit = Circuit()
circuit += ops.DefinitionBit("cr", 2, is_output=True)
circuit += ops.DefinitionBit("crr", 3, is_output=True)
qr = QuantumRegister(1, "qrg")
cr = ClassicalRegister(2, "cr")
cr2 = ClassicalRegister(3, "crr")
qc = QuantumCircuit(qr, cr, cr2)
out_circ, _ = to_qiskit_circuit(circuit, qubit_register_name="qrg")
assert out_circ == qc
def test_setstatevector() -> None:
"""Test PragmaSetStateVector operation."""
circuit = Circuit()
circuit += ops.PragmaSetStateVector([0, 1])
qc = QuantumCircuit(1)
qc.initialize([0, 1])
out_circ, _ = to_qiskit_circuit(circuit)
assert out_circ == qc
circuit = Circuit()
circuit += ops.PragmaSetStateVector([0, 1])
circuit += ops.RotateX(0, 0.23)
qc = QuantumCircuit(1)
qc.initialize([0, 1])
qc.rx(0.23, 0)
out_circ, _ = to_qiskit_circuit(circuit)
assert out_circ == qc
def test_repeated_measurement() -> None:
"""Test PragmaRepeatedMeasurement operation."""
circuit = Circuit()
circuit += ops.Hadamard(0)
circuit += ops.Hadamard(1)
circuit += ops.DefinitionBit("ri", 2, True)
circuit += ops.PragmaRepeatedMeasurement("ri", 300)
qr = QuantumRegister(2, "q")
cr = ClassicalRegister(2, "ri")
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.h(1)
qc.measure(qr, cr)
out_circ, sim_dict = to_qiskit_circuit(circuit)
assert out_circ == qc
assert ("ri", 300, None) in sim_dict["MeasurementInfo"]["PragmaRepeatedMeasurement"]
def test_measure_qubit() -> None:
"""Test MeasureQubit operation."""
circuit = Circuit()
circuit += ops.Hadamard(0)
circuit += ops.PauliZ(1)
circuit += ops.DefinitionBit("crg", 1, is_output=True)
circuit += ops.MeasureQubit(0, "crg", 0)
qr = QuantumRegister(2, "q")
cr = ClassicalRegister(1, "crg")
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.z(1)
qc.measure(0, cr)
out_circ, sim_dict = to_qiskit_circuit(circuit)
assert out_circ == qc
assert (0, "crg", 0) in sim_dict["MeasurementInfo"]["MeasureQubit"]
@pytest.mark.parametrize("repetitions", [0, 2, 4, "test"])
def test_pragma_loop(repetitions: Union[int, str]) -> None:
"""Test PragmaLoop operation."""
inner_circuit = Circuit()
inner_circuit += ops.PauliX(1)
inner_circuit += ops.PauliY(2)
circuit = Circuit()
circuit += ops.Hadamard(0)
circuit += ops.PragmaLoop(repetitions=repetitions, circuit=inner_circuit)
circuit += ops.Hadamard(3)
qc = QuantumCircuit(4)
qc.h(0)
if not isinstance(repetitions, str):
for _ in range(repetitions):
qc.x(1)
qc.y(2)
qc.h(3)
try:
out_circ, _ = to_qiskit_circuit(circuit)
assert out_circ == qc
except ValueError as e:
assert e.args == ("A symbolic PragmaLoop operation is not supported.",)
def test_custom_gates_fix() -> None:
"""Test _custom_gates_fix method."""
qoqo_circuit = Circuit()
qoqo_circuit += ops.PragmaSleep([0, 3], 1.0)
qoqo_circuit += ops.PauliX(2)
qoqo_circuit += ops.PragmaSleep([4], 0.004)
qoqo_circuit += ops.RotateXY(3, 0.1, 0.1)
qr = QuantumRegister(5, "q")
qiskit_circuit = QuantumCircuit(qr)
qiskit_circuit.delay(1.0, qr[0], unit="s")
qiskit_circuit.delay(1.0, qr[3], unit="s")
qiskit_circuit.x(qr[2])
qiskit_circuit.delay(0.004, qr[4], unit="s")
qiskit_circuit.r(0.1, 0.1, qr[3])
out_circ, _ = to_qiskit_circuit(qoqo_circuit)
assert out_circ == qiskit_circuit
def test_simulation_info() -> None:
"""Test SimulationInfo dictionary."""
circuit = Circuit()
circuit += ops.Hadamard(0)
circuit += ops.CNOT(0, 1)
circuit += ops.DefinitionBit("ro", 2, True)
circuit += ops.PragmaGetStateVector("ro", None)
circuit += ops.PragmaGetDensityMatrix("ro", None)
_, sim_dict = to_qiskit_circuit(circuit)
assert sim_dict["SimulationInfo"]["PragmaGetStateVector"]
assert sim_dict["SimulationInfo"]["PragmaGetDensityMatrix"]
# For pytest
if __name__ == "__main__":
pytest.main(sys.argv)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, test_loader):
all_preds = torch.tensor([])
for batch in test_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2) #Added layer
x = F.relu(self.conv2(x))
x = self.dropout(x) #Added layer
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1) #Added layer
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1) #Added layer
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QiskitCircuit():
# Specify initial parameters and the quantum circuit
def __init__(self,shots):
self.theta = Parameter('Theta')
self.shots = shots
def create_circuit():
qr = QuantumRegister(1,'q')
cr = ClassicalRegister(1,'c')
ckt = QuantumCircuit(qr,cr)
ckt.h(qr[0])
ckt.barrier()
ckt.ry(self.theta,qr[0])
ckt.barrier()
ckt.measure(qr,cr)
return ckt
self.circuit = create_circuit()
def N_qubit_expectation_Z(self,counts, shots, nr_qubits):
expects = np.zeros(nr_qubits)
for key in counts.keys():
perc = counts[key]/shots
check = np.array([(float(key[i])-1/2)*2*perc for i in range(nr_qubits)])
expects += check
return expects
def bind(self, parameters):
[self.theta] = to_numbers(parameters)
self.circuit.data[2][0]._params = to_numbers(parameters)
def run(self, i):
self.bind(i)
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.circuit,backend,shots=self.shots)
result_sim = job_sim.result()
counts = result_sim.get_counts(self.circuit)
return self.N_qubit_expectation_Z(counts,self.shots,1)
class TorchCircuit(Function):
@staticmethod
def forward(ctx, i):
if not hasattr(ctx, 'QiskitCirc'):
ctx.QiskitCirc = QiskitCircuit(shots=100)
exp_value = ctx.QiskitCirc.run(i[0])
result = torch.tensor([exp_value]) # store the result as a torch tensor
ctx.save_for_backward(result, i)
return result
@staticmethod
def backward(ctx, grad_output):
s = np.pi/2
forward_tensor, i = ctx.saved_tensors
# Obtain paramaters
input_numbers = to_numbers(i[0])
gradient = []
for k in range(len(input_numbers)):
input_plus_s = input_numbers
input_plus_s[k] = input_numbers[k] + s # Shift up by s
exp_value_plus = ctx.QiskitCirc.run(torch.tensor(input_plus_s))[0]
result_plus_s = torch.tensor([exp_value_plus])
input_minus_s = input_numbers
input_minus_s[k] = input_numbers[k] - s # Shift down by s
exp_value_minus = ctx.QiskitCirc.run(torch.tensor(input_minus_s))[0]
result_minus_s = torch.tensor([exp_value_minus])
gradient_result = (result_plus_s - result_minus_s)
gradient.append(gradient_result)
result = torch.tensor([gradient])
return result.float() * grad_output.float()
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import tensorflow as tf
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
#Testing data
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels1 = cifar_testset.targets # get the labels for the data
labels1 = np.array(labels1)
idx1_ae = np.where(labels1 == 0) # filter on aeroplanes
idx2_au = np.where(labels1 == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=50
# concatenate the data indices
idxa = np.concatenate((idx1_ae[0][0:n],idx2_au[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idxa]
cifar_testset.data = cifar_testset.data[idxa]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
#qc = TorchCircuit.apply
Ignore this cell
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
#Testing the quantum hybrid in order to comapre it with the classical one
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500,1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
#Testing data
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels1 = cifar_testset.targets # get the labels for the data
labels1 = np.array(labels1)
idx1_ae = np.where(labels1 == 0) # filter on aeroplanes
idx2_au = np.where(labels1 == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=50
# concatenate the data indices
idxa = np.concatenate((idx1_ae[0][0:n],idx2_au[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idxa]
cifar_testset.data = cifar_testset.data[idxa]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
#qc = TorchCircuit.apply
Ignore this cell
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
import torch
import torchvision
import torchvision.transforms as transforms
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
#Testing the quantum hybrid in order to comapre it with the classical one
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500,49)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 99)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list3.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list3[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listA = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listA.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listA[-1]))
#Now plotting the training graph
plt.plot(loss_listA)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listb = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listb.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listb[-1]))
#Now plotting the training graph
plt.plot(loss_listb)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 10
loss_listc = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listc.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listc[-1]))
#Now plotting the training graph
plt.plot(loss_listc)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_listd = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listd.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listd[-1]))
#Now plotting the training graph
plt.plot(loss_listd)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
##Adagrad
model = Net()
optimizer = optim.Adagrad(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#RMSprop
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#ADAM 20 epochs
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
loss.item()
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listE = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listE.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listE[-1]))
#Now plotting the training graph
plt.plot(loss_listE)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list3.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list3[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listA = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listA.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listA[-1]))
#Now plotting the training graph
plt.plot(loss_listA)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listb = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listb.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listb[-1]))
#Now plotting the training graph
plt.plot(loss_listb)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 10
loss_listc = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listc.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listc[-1]))
#Now plotting the training graph
plt.plot(loss_listc)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_listd = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listd.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listd[-1]))
#Now plotting the training graph
plt.plot(loss_listd)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
##Adagrad
model = Net()
optimizer = optim.Adagrad(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#RMSprop
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#ADAM 20 epochs
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
loss.item()
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listE = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listE.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listE[-1]))
#Now plotting the training graph
plt.plot(loss_listE)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list3.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list3[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adamax(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 10
loss_listK = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listK.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listK[-1]))
#Now plotting the training graph
plt.plot(loss_listK)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 15
loss_listL = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listL.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listL[-1]))
#Now plotting the training graph
plt.plot(loss_listL)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.PoissonNLLLoss()
epochs = 5
loss_listK = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listK.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listK[-1]))
#Now plotting the training graph
plt.plot(loss_listK)
plt.title('Negative log likelihood with Poisson')
plt.xlabel('Training Iterations')
plt.ylabel('NLL with Poisson distribution.')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 5
loss_listM = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listM.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listM[-1]))
#Now plotting the training graph
plt.plot(loss_listM)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Negative log Likelihood loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_listM = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listM.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listM[-1]))
#Now plotting the training graph
plt.plot(loss_listM)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Negative log Likelihood loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 15
loss_listM = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listM.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listM[-1]))
#Now plotting the training graph
plt.plot(loss_listM)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Negative log Likelihood loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 20
loss_listM = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listM.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listM[-1]))
#Now plotting the training graph
plt.plot(loss_listM)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Negative log Likelihood loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 5
loss_listN = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listN.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listN[-1]))
#Now plotting the training graph
plt.plot(loss_listN)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
#changing epochs
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 10
loss_listN = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listN.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listN[-1]))
#Now plotting the training graph
plt.plot(loss_listN)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
#epochs to 15
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 15
loss_listN = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listN.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listN[-1]))
#Now plotting the training graph
plt.plot(loss_listN)
plt.title('Hybrid NN training convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listA = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listA.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listA[-1]))
#Now plotting the training graph
plt.plot(loss_listA)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listb = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listb.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listb[-1]))
#Now plotting the training graph
plt.plot(loss_listb)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 10
loss_listc = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listc.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listc[-1]))
#Now plotting the training graph
plt.plot(loss_listc)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_listd = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listd.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listd[-1]))
#Now plotting the training graph
plt.plot(loss_listd)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listF = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listF.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listF[-1]))
#Now plotting the training graph
plt.plot(loss_listF)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
##Adagrad
model = Net()
optimizer = optim.Adagrad(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#RMSprop
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_listG = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listG.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listG[-1]))
#Now plotting the training graph
plt.plot(loss_listG)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
#ADAM 20 epochs
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_listH = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listH.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listH[-1]))
#Now plotting the training graph
plt.plot(loss_listH)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
loss.item()
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100))
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.RMSprop(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_listD = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listD.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listD[-1]))
#Now plotting the training graph
plt.plot(loss_listD)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_listE = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listE.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listE[-1]))
#Now plotting the training graph
plt.plot(loss_listE)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list3.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list3[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adamax(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list4 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list4.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list4[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list4)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 10
loss_listK = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listK.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listK[-1]))
#Now plotting the training graph
plt.plot(loss_listK)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
epochs = 15
loss_listL = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listL.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listL[-1]))
#Now plotting the training graph
plt.plot(loss_listL)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Cross Entropy loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.PoissonNLLLoss()
epochs = 5
loss_listK = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_listK.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_listK[-1]))
#Now plotting the training graph
plt.plot(loss_listK)
plt.title('Negative log likelihood with Poisson')
plt.xlabel('Training Iterations')
plt.ylabel('NLL with Poisson distribution.')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
#Testing data
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels1 = cifar_testset.targets # get the labels for the data
labels1 = np.array(labels1)
idx1_ae = np.where(labels1 == 0) # filter on aeroplanes
idx2_au = np.where(labels1 == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=50
# concatenate the data indices
idxa = np.concatenate((idx1_ae[0][0:n],idx2_au[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idxa]
cifar_testset.data = cifar_testset.data[idxa]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
#qc = TorchCircuit.apply
Ignore this cell
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list3 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list3.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list3[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list3)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
import torch
import torchvision
import torchvision.transforms as transforms
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
#Testing the quantum hybrid in order to comapre it with the classical one
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500,49)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 99)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QiskitCircuit():
# Specify initial parameters and the quantum circuit
def __init__(self,shots):
self.theta = Parameter('Theta')
self.shots = shots
def create_circuit():
qr = QuantumRegister(1,'q')
cr = ClassicalRegister(1,'c')
ckt = QuantumCircuit(qr,cr)
ckt.h(qr[0])
ckt.barrier()
ckt.ry(self.theta,qr[0])
ckt.barrier()
ckt.measure(qr,cr)
return ckt
self.circuit = create_circuit()
def N_qubit_expectation_Z(self,counts, shots, nr_qubits):
expects = np.zeros(nr_qubits)
for key in counts.keys():
perc = counts[key]/shots
check = np.array([(float(key[i])-1/2)*2*perc for i in range(nr_qubits)])
expects += check
return expects
def bind(self, parameters):
[self.theta] = to_numbers(parameters)
self.circuit.data[2][0]._params = to_numbers(parameters)
def run(self, i):
self.bind(i)
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.circuit,backend,shots=self.shots)
result_sim = job_sim.result()
counts = result_sim.get_counts(self.circuit)
return self.N_qubit_expectation_Z(counts,self.shots,1)
class TorchCircuit(Function):
@staticmethod
def forward(ctx, i):
if not hasattr(ctx, 'QiskitCirc'):
ctx.QiskitCirc = QiskitCircuit(shots=100)
exp_value = ctx.QiskitCirc.run(i[0])
result = torch.tensor([exp_value]) # store the result as a torch tensor
ctx.save_for_backward(result, i)
return result
@staticmethod
def backward(ctx, grad_output):
s = np.pi/2
forward_tensor, i = ctx.saved_tensors
# Obtain paramaters
input_numbers = to_numbers(i[0])
gradient = []
for k in range(len(input_numbers)):
input_plus_s = input_numbers
input_plus_s[k] = input_numbers[k] + s # Shift up by s
exp_value_plus = ctx.QiskitCirc.run(torch.tensor(input_plus_s))[0]
result_plus_s = torch.tensor([exp_value_plus])
input_minus_s = input_numbers
input_minus_s[k] = input_numbers[k] - s # Shift down by s
exp_value_minus = ctx.QiskitCirc.run(torch.tensor(input_minus_s))[0]
result_minus_s = torch.tensor([exp_value_minus])
gradient_result = (result_plus_s - result_minus_s)
gradient.append(gradient_result)
result = torch.tensor([gradient])
return result.float() * grad_output.float()
#import torchvision
#transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
#labels = cifar_trainset.targets # get the labels for the data
#labels = labels.numpy()
#idx1 = np.where(labels == 0) # filter on aeroplanes
#idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
#n=100
# concatenate the data indices
#idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
#cifar_trainset.targets = labels[idx]
#cifar_trainset.data = cifar_trainset.data[idx]
#train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import tensorflow
import torchvision
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
qc = TorchCircuit.apply
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
network = Net()
#input = input.permute(1,0,2,3)
optimizer = optim.Adam(network.parameters(), lr=0.001)
epochs = 10
loss_list = []
for epoch in range(epochs):
total_loss = []
target_list = []
for batch_idx, (data, target) in enumerate(train_loader):
target_list.append(target.item())
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print(loss_list[-1])
# Normalise the loss between 0 and 1
for i in range(len(loss_list)):
loss_list[i] += 1
# Plot the loss per epoch
plt.plot(loss_list)
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QiskitCircuit():
# Specify initial parameters and the quantum circuit
def __init__(self,shots):
self.theta = Parameter('Theta')
self.shots = shots
def create_circuit():
qr = QuantumRegister(1,'q')
cr = ClassicalRegister(1,'c')
ckt = QuantumCircuit(qr,cr)
ckt.h(qr[0])
ckt.barrier()
ckt.ry(self.theta,qr[0])
ckt.barrier()
ckt.measure(qr,cr)
return ckt
self.circuit = create_circuit()
def N_qubit_expectation_Z(self,counts, shots, nr_qubits):
expects = np.zeros(nr_qubits)
for key in counts.keys():
perc = counts[key]/shots
check = np.array([(float(key[i])-1/2)*2*perc for i in range(nr_qubits)])
expects += check
return expects
def bind(self, parameters):
[self.theta] = to_numbers(parameters)
self.circuit.data[2][0]._params = to_numbers(parameters)
def run(self, i):
self.bind(i)
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.circuit,backend,shots=self.shots)
result_sim = job_sim.result()
counts = result_sim.get_counts(self.circuit)
return self.N_qubit_expectation_Z(counts,self.shots,1)
class TorchCircuit(Function):
@staticmethod
def forward(ctx, i):
if not hasattr(ctx, 'QiskitCirc'):
ctx.QiskitCirc = QiskitCircuit(shots=100)
exp_value = ctx.QiskitCirc.run(i[0])
result = torch.tensor([exp_value]) # store the result as a torch tensor
ctx.save_for_backward(result, i)
return result
@staticmethod
def backward(ctx, grad_output):
s = np.pi/2
forward_tensor, i = ctx.saved_tensors
# Obtain paramaters
input_numbers = to_numbers(i[0])
gradient = []
for k in range(len(input_numbers)):
input_plus_s = input_numbers
input_plus_s[k] = input_numbers[k] + s # Shift up by s
exp_value_plus = ctx.QiskitCirc.run(torch.tensor(input_plus_s))[0]
result_plus_s = torch.tensor([exp_value_plus])
input_minus_s = input_numbers
input_minus_s[k] = input_numbers[k] - s # Shift down by s
exp_value_minus = ctx.QiskitCirc.run(torch.tensor(input_minus_s))[0]
result_minus_s = torch.tensor([exp_value_minus])
gradient_result = (result_plus_s - result_minus_s)
gradient.append(gradient_result)
result = torch.tensor([gradient])
return result.float() * grad_output.float()
#import torchvision
#transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
#labels = cifar_trainset.targets # get the labels for the data
#labels = labels.numpy()
#idx1 = np.where(labels == 0) # filter on aeroplanes
#idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
#n=100
# concatenate the data indices
#idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
#cifar_trainset.targets = labels[idx]
#cifar_trainset.data = cifar_trainset.data[idx]
#train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import tensorflow
import torchvision
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
qc = TorchCircuit.apply
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
network = Net()
#input = input.permute(1,0,2,3)
optimizer = optim.Adam(network.parameters(), lr=0.001)
epochs = 15
loss_list = []
for epoch in range(epochs):
total_loss = []
target_list = []
for batch_idx, (data, target) in enumerate(train_loader):
target_list.append(target.item())
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print(loss_list[-1])
# Normalise the loss between 0 and 1
for i in range(len(loss_list)):
loss_list[i] += 1
# Plot the loss per epoch
plt.plot(loss_list)
network = Net()
#input = input.permute(1,0,2,3)
optimizer = optim.Adam(network.parameters(), lr=0.001)
epochs = 5
loss_list = []
for epoch in range(epochs):
total_loss = []
target_list = []
for batch_idx, (data, target) in enumerate(train_loader):
target_list.append(target.item())
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print(loss_list[-1])
# Normalise the loss between 0 and 1
for i in range(len(loss_list)):
loss_list[i] += 1
# Plot the loss per epoch
plt.plot(loss_list)
network = Net()
#input = input.permute(1,0,2,3)
optimizer = optim.Adam(network.parameters(), lr=0.001)
epochs = 15
loss_list = []
for epoch in range(epochs):
total_loss = []
target_list = []
for batch_idx, (data, target) in enumerate(train_loader):
target_list.append(target.item())
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print(loss_list[-1])
# Normalise the loss between 0 and 1
for i in range(len(loss_list)):
loss_list[i] += 1
# Plot the loss per epoch
plt.plot(loss_list)
network = Net()
#input = input.permute(1,0,2,3)
optimizer = optim.Adam(network.parameters(), lr=0.001)
epochs = 20
loss_list = []
for epoch in range(epochs):
total_loss = []
target_list = []
for batch_idx, (data, target) in enumerate(train_loader):
target_list.append(target.item())
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print(loss_list[-1])
# Normalise the loss between 0 and 1
for i in range(len(loss_list)):
loss_list[i] += 1
# Plot the loss per epoch
plt.plot(loss_list)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels1 = cifar_testset.targets # get the labels for the data
labels1 = np.array(labels1)
idx1_ae = np.where(labels1 == 0) # filter on aeroplanes
idx2_au = np.where(labels1 == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=50
# concatenate the data indices
idxa = np.concatenate((idx1_ae[0][0:n],idx2_au[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idxa]
cifar_testset.data = cifar_testset.data[idxa]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
qc = TorchCircuit.apply
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 11)
def forward(self,x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x+1)/2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1-x), -1)
return x
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, test_loader):
all_preds = torch.tensor([])
for batch in test_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 50
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
###This is CPU run not GPU
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2) #Added layer
x = F.relu(self.conv2(x))
x = self.dropout(x) #Added layer
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1) #Added layer
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
#x = self.dropout(x) omitting this layer
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 50
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, test_loader):
all_preds = torch.tensor([])
for batch in test_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 50
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
###This is CPU run not GPU
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2) #Added layer
x = F.relu(self.conv2(x))
x = self.dropout(x) #Added layer
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(20, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1) #Added layer
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1) #Added layer
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
#x = self.dropout(x) omitting this layer
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 40
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 50
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
pip install qiskit
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from matplotlib import pyplot as plt
%matplotlib inline
pip install time
pip install qiskit_machine_learning
import numpy as np
import matplotlib.pyplot as plt
from torch import Tensor
from torch.nn import Linear, CrossEntropyLoss, MSELoss
from torch.optim import LBFGS
from qiskit import QuantumCircuit
from qiskit.utils import algorithm_globals
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.connectors import TorchConnector
# Set seed for random generators
algorithm_globals.random_seed = 12
# Generate random dataset
# Select dataset dimension (num_inputs) and size (num_samples)
num_inputs = 2
num_samples = 20
# Generate random input coordinates (X) and binary labels (y)
X = 2 * algorithm_globals.random.random([num_samples, num_inputs]) - 1
y01 = 1 * (np.sum(X, axis=1) >= 0) # in { 0, 1}, y01 will be used for SamplerQNN example
y = 2 * y01 - 1 # in {-1, +1}, y will be used for EstimatorQNN example
# Convert to torch Tensors
X_ = Tensor(X)
y01_ = Tensor(y01).reshape(len(y)).long()
y_ = Tensor(y).reshape(len(y), 1)
# Plot dataset
for x, y_target in zip(X, y):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
pip install pylatexenc
pip install matplotlib --upgrade
pip install MatplotlibDrawer
# Set up a circuit
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs)
qc = QuantumCircuit(num_inputs)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
#qc.draw("mpl")
print(qc)
# Setup QNN
qnn1 = EstimatorQNN(
circuit=qc, input_params=feature_map.parameters, weight_params=ansatz.parameters
)
# Set up PyTorch module
# Note: If we don't explicitly declare the initial weights
# they are chosen uniformly at random from [-1, 1].
initial_weights = 0.1 * (2 * algorithm_globals.random.random(qnn1.num_weights) - 1)
model1 = TorchConnector(qnn1, initial_weights=initial_weights)
print("Initial weights: ", initial_weights)
#single input test
model1(X_[0, :])
# Define optimizer and loss
optimizer = LBFGS(model1.parameters())
f_loss = MSELoss(reduction="sum")
# Start training
model1.train() # set model to training mode
# Note from (https://pytorch.org/docs/stable/optim.html):
# Some optimization algorithms such as LBFGS need to
# reevaluate the function multiple times, so you have to
# pass in a closure that allows them to recompute your model.
# The closure should clear the gradients, compute the loss,
# and return it.
def closure():
optimizer.zero_grad() # Initialize/clear gradients
loss = f_loss(model1(X_), y_) # Evaluate loss function
loss.backward() # Backward pass
print(loss.item()) # Print loss
return loss
# Run optimizer step4
optimizer.step(closure)
# Evaluate model and compute accuracy
y_predict = []
for x, y_target in zip(X, y):
output = model1(Tensor(x))
y_predict += [np.sign(output.detach().numpy())[0]]
print("Accuracy:", sum(y_predict == y) / len(y))
# Plot results
# red == wrongly classified
for x, y_target, y_p in zip(X, y, y_predict):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if y_target != y_p:
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
# Define feature map and ansatz
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs, entanglement="linear", reps=1)
# Define quantum circuit of num_qubits = input dim
# Append feature map and ansatz
qc = QuantumCircuit(num_inputs)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
# Define SamplerQNN and initial setup
parity = lambda x: "{:b}".format(x).count("1") % 2 # optional interpret function
output_shape = 2 # parity = 0, 1
qnn2 = SamplerQNN(
circuit=qc,
input_params=feature_map.parameters,
weight_params=ansatz.parameters,
interpret=parity,
output_shape=output_shape,
)
# Set up PyTorch module
# Reminder: If we don't explicitly declare the initial weights
# they are chosen uniformly at random from [-1, 1].
initial_weights = 0.1 * (2 * algorithm_globals.random.random(qnn2.num_weights) - 1)
print("Initial weights: ", initial_weights)
model2 = TorchConnector(qnn2, initial_weights)
# Define model, optimizer, and loss
optimizer = LBFGS(model2.parameters())
f_loss = CrossEntropyLoss() # Our output will be in the [0,1] range
# Start training
model2.train()
# Define LBFGS closure method (explained in previous section)
def closure():
optimizer.zero_grad(set_to_none=True) # Initialize gradient
loss = f_loss(model2(X_), y01_) # Calculate loss
loss.backward() # Backward pass
print(loss.item()) # Print loss
return loss
# Run optimizer (LBFGS requires closure)
optimizer.step(closure);
# Evaluate model and compute accuracy
y_predict = []
for x in X:
output = model2(Tensor(x))
y_predict += [np.argmax(output.detach().numpy())]
print("Accuracy:", sum(y_predict == y01) / len(y01))
# plot results
# red == wrongly classified
for x, y_target, y_ in zip(X, y01, y_predict):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if y_target != y_:
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
# Additional torch-related imports
import torch
from torch import cat, no_grad, manual_seed
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import (
Module,
Conv2d,
Linear,
Dropout2d,
NLLLoss,
MaxPool2d,
Flatten,
Sequential,
ReLU,
)
import torch.nn.functional as F
# torchvision API to directly load a subset of the MNIST dataset and define torch DataLoaders (link) for train and test.
# Train Dataset
# -------------
# Set train shuffle seed (for reproducibility)
manual_seed(42)
batch_size = 1
n_samples = 100 # We will concentrate on the first 100 samples
# Use pre-defined torchvision function to load MNIST train data
X_train = datasets.MNIST(
root="./data", train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
# Filter out labels (originally 0-9), leaving only labels 0 and 1
idx = np.append(
np.where(X_train.targets == 0)[0][:n_samples], np.where(X_train.targets == 1)[0][:n_samples]
)
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
# Define torch dataloader with filtered data
train_loader = DataLoader(X_train, batch_size=batch_size, shuffle=True)
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap="gray")
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets[0].item()))
n_samples_show -= 1
# Test Dataset
# -------------
# Set test shuffle seed (for reproducibility)
# manual_seed(5)
n_samples = 50
# Use pre-defined torchvision function to load MNIST test data
X_test = datasets.MNIST(
root="./data", train=False, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
# Filter out labels (originally 0-9), leaving only labels 0 and 1
idx = np.append(
np.where(X_test.targets == 0)[0][:n_samples], np.where(X_test.targets == 1)[0][:n_samples]
)
X_test.data = X_test.data[idx]
X_test.targets = X_test.targets[idx]
# Define torch dataloader with filtered data
test_loader = DataLoader(X_test, batch_size=batch_size, shuffle=True)
#Defining QNN and the Hybrid
# Define and create QNN
def create_qnn():
feature_map = ZZFeatureMap(2)
ansatz = RealAmplitudes(2, reps=1)
qc = QuantumCircuit(2)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
# REMEMBER TO SET input_gradients=True FOR ENABLING HYBRID GRADIENT BACKPROP
qnn = EstimatorQNN(
circuit=qc,
input_params=feature_map.parameters,
weight_params=ansatz.parameters,
input_gradients=True,
)
return qnn
qnn4 = create_qnn()
# Define torch NN module
class Net(Module):
def __init__(self, qnn):
super().__init__()
self.conv1 = Conv2d(1, 2, kernel_size=5)
self.conv2 = Conv2d(2, 16, kernel_size=5)
self.dropout = Dropout2d()
self.fc1 = Linear(256, 64)
self.fc2 = Linear(64, 2) # 2-dimensional input to QNN
self.qnn = TorchConnector(qnn) # Apply torch connector, weights chosen
# uniformly at random from interval [-1,1].
self.fc3 = Linear(1, 1) # 1-dimensional output from QNN
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # apply QNN
x = self.fc3(x)
return cat((x, 1 - x), -1)
model4 = Net(qnn4)
# Define model, optimizer, and loss function
optimizer = optim.Adam(model4.parameters(), lr=0.001)
loss_func = NLLLoss()
# Start training
epochs = 10 # Set number of epochs
loss_list = [] # Store loss history
model4.train() # Set model to training mode
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Initialize gradient
output = model4(data) # Forward pass
loss = loss_func(output, target) # Calculate loss
loss.backward() # Backward pass
optimizer.step() # Optimize weights
total_loss.append(loss.item()) # Store loss
loss_list.append(sum(total_loss) / len(total_loss))
print("Training [{:.0f}%]\tLoss: {:.4f}".format(100.0 * (epoch + 1) / epochs, loss_list[-1]))
# Plot loss convergence
plt.plot(loss_list)
plt.title("Hybrid NN Training Convergence")
plt.xlabel("Training Iterations")
plt.ylabel("Neg. Log Likelihood Loss")
plt.show()
torch.save(model4.state_dict(), "model4.pt")
# to execute on real hardware, change the backend
qnn5 = create_qnn()
model5 = Net(qnn5)
model5.load_state_dict(torch.load("model4.pt"))
model5.eval() # set model to evaluation mode
with no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model5(data)
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print(
"Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%".format(
sum(total_loss) / len(total_loss), correct / len(test_loader) / batch_size * 100
)
)
# Plot predicted labels
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model5.eval()
with no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model5(data[0:1])
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(data[0].numpy().squeeze(), cmap="gray")
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title("Predicted {}".format(pred.item()))
count += 1
# Define model, optimizer, and loss function
optimizer = optim.Adam(model4.parameters(), lr=0.001)
loss_func = NLLLoss()
# Start training
epochs = 5 # Set number of epochs
loss_list = [] # Store loss history
model4.train() # Set model to training mode
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Initialize gradient
output = model4(data) # Forward pass
loss = loss_func(output, target) # Calculate loss
loss.backward() # Backward pass
optimizer.step() # Optimize weights
total_loss.append(loss.item()) # Store loss
loss_list.append(sum(total_loss) / len(total_loss))
print("Training [{:.0f}%]\tLoss: {:.4f}".format(100.0 * (epoch + 1) / epochs, loss_list[-1]))
# to execute on real hardware, change the backend
qnn5 = create_qnn()
model5 = Net(qnn5)
model5.load_state_dict(torch.load("model4.pt"))
model5.eval() # set model to evaluation mode
with no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model5(data)
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print(
"Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%".format(
sum(total_loss) / len(total_loss), correct / len(test_loader) / batch_size * 100
)
)
# Define model, optimizer, and loss function
optimizer = optim.Adam(model4.parameters(), lr=0.001)
loss_func = NLLLoss()
# Start training
epochs = 5 # Set number of epochs
loss_list = [] # Store loss history
model4.train() # Set model to training mode
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Initialize gradient
output = model4(data) # Forward pass
loss = loss_func(output, target) # Calculate loss
loss.backward() # Backward pass
optimizer.step() # Optimize weights
total_loss.append(loss.item()) # Store loss
loss_list.append(sum(total_loss) / len(total_loss))
print("Training [{:.0f}%]\tLoss: {:.4f}".format(100.0 * (epoch + 1) / epochs, loss_list[-1]))
# to execute on real hardware, change the backend
qnn5 = create_qnn()
model5 = Net(qnn5)
model5.load_state_dict(torch.load("model4.pt"))
model5.eval() # set model to evaluation mode
with no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model5(data)
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print(
"Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%".format(
sum(total_loss) / len(total_loss), correct / len(test_loader) / batch_size * 100
)
)
print(qc)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, test_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
len(cifar_testset)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
model.load_state_dict(torch.load('model_cifar.pt'))
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 25
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
data=data.squeeze()
axes[count].imshow(data[0].numpy().squeeze(), cmap='gray')
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title('Predicted {}'.format(pred.item()))
count += 1
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 8
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 9
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 6
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0004)
loss_func = nn.NLLLoss()
epochs = 6
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0003)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
early_stopping = EarlyStopping(patience=patience, verbose=True)
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[-1], loss_list_V[-1]))
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Validation [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 8
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
#print('validation [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[epoch], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.CrossEntropyLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1, loss_list[epoch], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1)
@torch.no_grad()
def get_all_preds(model, train_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list_V = []
model.train()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list_V)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Validation Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(valid_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
validation_loss = loss_func(output, target)
valid_loss.append(validation_loss.item())
print('Performance on test data:\n\tValidation Loss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(valid_loss) / len(valid_loss),
correct / len(valid_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, train_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list_V = []
model.train()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list_V)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Validation Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(valid_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
validation_loss = loss_func(output, target)
valid_loss.append(validation_loss.item())
print('Performance on test data:\n\tValidation Loss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(valid_loss) / len(valid_loss),
correct / len(valid_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.CrossEntropyLoss()
epochs = 5
loss_list_V = []
model.train()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list_V)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Validation Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(valid_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
validation_loss = loss_func(output, target)
valid_loss.append(validation_loss.item())
print('Performance on test data:\n\tValidation Loss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(valid_loss) / len(valid_loss),
correct / len(valid_loader) * 100)
)
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[160,40])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, train_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
len(cifar_testset)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, train_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
len(cifar_testset)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
data=data.squeeze()
axes[count].imshow(data[0].numpy().squeeze(), cmap='gray')
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title('Predicted {}'.format(pred.item()))
count += 1
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, train_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
len(cifar_testset)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
data=data.squeeze()
axes[count].imshow(data[0].numpy().squeeze(), cmap='gray')
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title('Predicted {}'.format(pred.item()))
count += 1
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 8
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 9
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 6
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0004)
loss_func = nn.NLLLoss()
epochs = 6
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0003)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/DRA-chaos/Quantum-Classical-Hyrid-Neural-Network-for-binary-image-classification-using-PyTorch-Qiskit-pipeline
|
DRA-chaos
|
!pip install qiskit
# check if CUDA is available
import torch
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
len(cifar_trainset)
from torch.utils.data import DataLoader, random_split
#cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
cifar_trainset, valid = random_split(cifar_trainset,[150,50])
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=1, shuffle=True)
@torch.no_grad()
def get_all_preds(model, train_loader):
all_preds = torch.tensor([])
for batch in train_loader:
images, labels = batch
preds = model(images)
all_preds = torch.cat(
(all_preds, preds)
,dim=0
)
return all_preds
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels = cifar_testset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idx]
cifar_testset.data = cifar_testset.data[idx]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=False)
len(cifar_testset)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
model.load_state_dict(torch.load('model_cifar.pt'))
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
model.load_state_dict(torch.load('model_cifar.pt'))
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 25
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
valid_loss_min = np.Inf # track change in validation loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
if (validation_loss)<=(valid_loss_min):
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
validation_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = validation_loss
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
%matplotlib inline
import matplotlib.pyplot as plt
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
data=data.squeeze()
axes[count].imshow(data[0].numpy().squeeze(), cmap='gray')
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title('Predicted {}'.format(pred.item()))
count += 1
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 8
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 9
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 6
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0004)
loss_func = nn.NLLLoss()
epochs = 6
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0003)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on train data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0005)
loss_func = nn.NLLLoss()
epochs = 7
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 15
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.0008)
loss_func = nn.NLLLoss()
epochs = 3
loss_list = []
loss_list_V = []
#training the model
model.train()
for epoch in range(epochs):
train_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
train_loss.append(loss.item())
loss_list.append(sum(train_loss)/len(train_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Validate the model
model.eval()
for epoch in range(epochs):
valid_loss = []
for batch_idx, (data, target) in enumerate(valid_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
validation_loss = loss_func(output, target)
# Backward pass
validation_loss.backward()
# Optimize the weights
optimizer.step()
valid_loss.append(validation_loss.item())
loss_list_V.append(sum(valid_loss)/len(valid_loss))
#print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list_V[-1]))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, loss_list[-1], loss_list_V[-1]))
#Now plotting the training graph
plt.plot(loss_list,label='Training Loss')
plt.plot(loss_list_V,label='Validation Loss')
plt.legend()
plt.show()
total_loss=[]
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
#Let us begin by importing necessary libraries.
from qiskit import Aer
from qiskit.algorithms import VQE, QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import *
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit.utils import algorithm_globals
from qiskit_finance import QiskitFinanceError
from qiskit_finance.applications.optimization import PortfolioOptimization
from qiskit_finance.data_providers import *
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization.applications import OptimizationApplication
from qiskit_optimization.converters import QuadraticProgramToQubo
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import datetime
import warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
warnings.simplefilter("ignore", SymPyDeprecationWarning)
# Set parameters for assets and risk factor
num_assets = 4 # set number of assets to 4
q = 0.5 # set risk factor to 0.5
budget = 2 # set budget as defined in the problem
seed = 132 # set random seed
# Generate time series data
stocks = [("STOCK%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(1955,11,5),
end=datetime.datetime(1985,10,26),
seed=seed)
data.run()
# Let's plot our finanical data
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('days')
plt.ylabel('stock value')
plt.show()
#Let's calculate the expected return for our problem data
mu = data.get_period_return_mean_vector() # Returns a vector containing the mean value of each asset's expected return.
print(mu)
# Let's plot our covariance matrix Σ(sigma)
sigma = data.get_period_return_covariance_matrix() #Returns the covariance matrix of the four assets
print(sigma)
fig, ax = plt.subplots(1,1)
im = plt.imshow(sigma, extent=[-1,1,-1,1])
x_label_list = ['stock3', 'stock2', 'stock1', 'stock0']
y_label_list = ['stock3', 'stock2', 'stock1', 'stock0']
ax.set_xticks([-0.75,-0.25,0.25,0.75])
ax.set_yticks([0.75,0.25,-0.25,-0.75])
ax.set_xticklabels(x_label_list)
ax.set_yticklabels(y_label_list)
plt.colorbar()
plt.clim(-0.000002, 0.00001)
plt.show()
##############################
# Provide your code here
portfolio =
qp =
##############################
print(qp)
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex1a
grade_ex1a(qp)
exact_mes = NumPyMinimumEigensolver()
exact_eigensolver = MinimumEigenOptimizer(exact_mes)
result = exact_eigensolver.solve(qp)
print(result)
optimizer = SLSQP(maxiter=1000)
algorithm_globals.random_seed = 1234
backend = Aer.get_backend('statevector_simulator')
##############################
# Provide your code here
vqe =
##############################
vqe_meo = MinimumEigenOptimizer(vqe) #please do not change this code
result = vqe_meo.solve(qp) #please do not change this code
print(result) #please do not change this code
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex1b
grade_ex1b(vqe, qp)
#Step 1: Let us begin by importing necessary libraries
import qiskit
from qiskit import Aer
from qiskit.algorithms import VQE, QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import *
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit.utils import algorithm_globals
from qiskit_finance import QiskitFinanceError
from qiskit_finance.applications.optimization import *
from qiskit_finance.data_providers import *
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization.applications import OptimizationApplication
from qiskit_optimization.converters import QuadraticProgramToQubo
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import datetime
import warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
warnings.simplefilter("ignore",SymPyDeprecationWarning)
# Step 2. Generate time series data for four assets.
# Do not change start/end dates specified to generate problem data.
seed = 132
num_assets = 4
stocks = [("STOCK%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(1955,11,5),
end=datetime.datetime(1985,10,26),
seed=seed)
data.run()
# Let's plot our finanical data (We are generating the same time series data as in the previous example.)
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('days')
plt.ylabel('stock value')
plt.show()
# Step 3. Calculate mu and sigma for this problem
mu2 = data.get_period_return_mean_vector() #Returns a vector containing the mean value of each asset.
sigma2 = data.get_period_return_covariance_matrix() #Returns the covariance matrix associated with the assets.
print(mu2, sigma2)
# Step 4. Set parameters and constraints based on this challenge 1c
##############################
# Provide your code here
q2 = #Set risk factor to 0.5
budget2 = #Set budget to 3
##############################
# Step 5. Complete code to generate the portfolio instance
##############################
# Provide your code here
portfolio2 =
qp2 =
##############################
# Step 6. Now let's use QAOA to solve this problem.
optimizer = SLSQP(maxiter=1000)
algorithm_globals.random_seed = 1234
backend = Aer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed)
##############################
# Provide your code here
qaoa =
##############################
qaoa_meo = MinimumEigenOptimizer(qaoa) #please do not change this code
result2 = qaoa_meo.solve(qp2) #please do not change this code
print(result2) #please do not change this code
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex1c
grade_ex1c(qaoa, qp2)
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver
# PSPCz molecule
geometry = [['C', [ -0.2316640, 1.1348450, 0.6956120]],
['C', [ -0.8886300, 0.3253780, -0.2344140]],
['C', [ -0.1842470, -0.1935670, -1.3239330]],
['C', [ 1.1662930, 0.0801450, -1.4737160]],
['C', [ 1.8089230, 0.8832220, -0.5383540]],
['C', [ 1.1155860, 1.4218050, 0.5392780]],
['S', [ 3.5450920, 1.2449890, -0.7349240]],
['O', [ 3.8606900, 1.0881590, -2.1541690]],
['C', [ 4.3889120, -0.0620730, 0.1436780]],
['O', [ 3.8088290, 2.4916780, -0.0174650]],
['C', [ 4.6830900, 0.1064460, 1.4918230]],
['C', [ 5.3364470, -0.9144080, 2.1705280]],
['C', [ 5.6895490, -2.0818670, 1.5007820]],
['C', [ 5.4000540, -2.2323130, 0.1481350]],
['C', [ 4.7467230, -1.2180160, -0.5404770]],
['N', [ -2.2589180, 0.0399120, -0.0793330]],
['C', [ -2.8394600, -1.2343990, -0.1494160]],
['C', [ -4.2635450, -1.0769890, 0.0660760]],
['C', [ -4.5212550, 0.2638010, 0.2662190]],
['C', [ -3.2669630, 0.9823890, 0.1722720]],
['C', [ -2.2678900, -2.4598950, -0.3287380]],
['C', [ -3.1299420, -3.6058560, -0.3236210]],
['C', [ -4.5179520, -3.4797390, -0.1395160]],
['C', [ -5.1056310, -2.2512990, 0.0536940]],
['C', [ -5.7352450, 1.0074800, 0.5140960]],
['C', [ -5.6563790, 2.3761270, 0.6274610]],
['C', [ -4.4287740, 3.0501460, 0.5083650]],
['C', [ -3.2040560, 2.3409470, 0.2746950]],
['H', [ -0.7813570, 1.5286610, 1.5426490]],
['H', [ -0.7079140, -0.7911480, -2.0611600]],
['H', [ 1.7161320, -0.2933710, -2.3302930]],
['H', [ 1.6308220, 2.0660550, 1.2427990]],
['H', [ 4.4214900, 1.0345500, 1.9875450]],
['H', [ 5.5773000, -0.7951290, 3.2218590]],
['H', [ 6.2017810, -2.8762260, 2.0345740]],
['H', [ 5.6906680, -3.1381740, -0.3739110]],
['H', [ 4.5337010, -1.3031330, -1.6001680]],
['H', [ -1.1998460, -2.5827750, -0.4596910]],
['H', [ -2.6937370, -4.5881470, -0.4657540]],
['H', [ -5.1332290, -4.3740010, -0.1501080]],
['H', [ -6.1752900, -2.1516170, 0.1987120]],
['H', [ -6.6812260, 0.4853900, 0.6017680]],
['H', [ -6.5574610, 2.9529350, 0.8109620]],
['H', [ -4.3980410, 4.1305040, 0.5929440]],
['H', [ -2.2726630, 2.8838620, 0.1712760]]]
molecule = Molecule(geometry=geometry, charge=0, multiplicity=1)
driver = ElectronicStructureMoleculeDriver(molecule=molecule,
basis='631g*',
driver_type=ElectronicStructureDriverType.PYSCF)
num_ao = {
'C': 14,
'H': 2,
'N': 14,
'O': 14,
'S': 18,
}
##############################
# Provide your code here
num_C_atom =
num_H_atom =
num_N_atom =
num_O_atom =
num_S_atom =
num_atoms_total =
num_AO_total =
num_MO_total =
##############################
answer_ex2a ={
'C': num_C_atom,
'H': num_H_atom,
'N': num_N_atom,
'O': num_O_atom,
'S': num_S_atom,
'atoms': num_atoms_total,
'AOs': num_AO_total,
'MOs': num_MO_total
}
print(answer_ex2a)
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex2a
grade_ex2a(answer_ex2a)
from qiskit_nature.drivers.second_quantization import HDF5Driver
driver_reduced = HDF5Driver("resources/PSPCz_reduced.hdf5")
properties = driver_reduced.run()
from qiskit_nature.properties.second_quantization.electronic import ElectronicEnergy
electronic_energy = properties.get_property(ElectronicEnergy)
print(electronic_energy)
from qiskit_nature.properties.second_quantization.electronic import ParticleNumber
##############################
# Provide your code here
particle_number =
num_electron =
num_MO =
num_SO =
num_qubits =
##############################
answer_ex2b = {
'electrons': num_electron,
'MOs': num_MO,
'SOs': num_SO,
'qubits': num_qubits
}
print(answer_ex2b)
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex2b
grade_ex2b(answer_ex2b)
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
##############################
# Provide your code here
es_problem =
##############################
second_q_op = es_problem.second_q_ops()
print(second_q_op[0])
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper, BravyiKitaevMapper
##############################
# Provide your code here
qubit_converter =
##############################
qubit_op = qubit_converter.convert(second_q_op[0])
print(qubit_op)
from qiskit_nature.circuit.library import HartreeFock
##############################
# Provide your code here
init_state =
##############################
init_state.draw()
from qiskit.circuit.library import EfficientSU2, TwoLocal, NLocal, PauliTwoDesign
from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD
##############################
# Provide your code here
ansatz =
##############################
ansatz.decompose().draw()
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit_nature.algorithms import GroundStateEigensolver
##############################
# Provide your code here
numpy_solver =
numpy_ground_state_solver =
numpy_results =
##############################
exact_energy = numpy_results.computed_energies[0]
print(f"Exact electronic energy: {exact_energy:.6f} Hartree\n")
print(numpy_results)
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex2c
grade_ex2c(numpy_results)
from qiskit.providers.aer import StatevectorSimulator, QasmSimulator
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP
##############################
# Provide your code here
backend =
optimizer =
##############################
from qiskit.algorithms import VQE
from qiskit_nature.algorithms import VQEUCCFactory, GroundStateEigensolver
from jupyterplot import ProgressPlot
import numpy as np
error_threshold = 10 # mHartree
np.random.seed(5) # fix seed for reproducibility
initial_point = np.random.random(ansatz.num_parameters)
# for live plotting
pp = ProgressPlot(plot_names=['Energy'],
line_names=['Runtime VQE', f'Target + {error_threshold}mH', 'Target'])
intermediate_info = {
'nfev': [],
'parameters': [],
'energy': [],
'stddev': []
}
def callback(nfev, parameters, energy, stddev):
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['energy'].append(energy)
intermediate_info['stddev'].append(stddev)
pp.update([[energy, exact_energy+error_threshold/1000, exact_energy]])
##############################
# Provide your code here
vqe =
vqe_ground_state_solver =
vqe_results =
##############################
print(vqe_results)
error = (vqe_results.computed_energies[0] - exact_energy) * 1000 # mHartree
print(f'Error is: {error:.3f} mHartree')
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex2d
grade_ex2d(vqe_results)
from qiskit_nature.algorithms import QEOM
##############################
# Provide your code here
qeom_excited_state_solver =
qeom_results =
##############################
print(qeom_results)
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex2e
grade_ex2e(qeom_results)
bandgap = qeom_results.computed_energies[1] - qeom_results.computed_energies[0]
bandgap # in Hartree
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q-education', group='ibm-4', project='qiskit-hackathon')
backend = provider.get_backend('ibmq_qasm_simulator')
backend
from qiskit_nature.runtime import VQEProgram
error_threshold = 10 # mHartree
# for live plotting
pp = ProgressPlot(plot_names=['Energy'],
line_names=['Runtime VQE', f'Target + {error_threshold}mH', 'Target'])
intermediate_info = {
'nfev': [],
'parameters': [],
'energy': [],
'stddev': []
}
def callback(nfev, parameters, energy, stddev):
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['energy'].append(energy)
intermediate_info['stddev'].append(stddev)
pp.update([[energy,exact_energy+error_threshold/1000, exact_energy]])
##############################
# Provide your code here
optimizer = {
'name': 'QN-SPSA', # leverage the Quantum Natural SPSA
# 'name': 'SPSA', # set to ordinary SPSA
'maxiter': 100,
}
runtime_vqe =
##############################
# Submit a runtime job using the following code
from qc_grader.challenges.unimelb_2022 import prepare_ex2f
runtime_job = prepare_ex2f(runtime_vqe, qubit_converter, es_problem)
# Check your answer and submit using the following code
from qc_grader.challenges.unimelb_2022 import grade_ex2f
grade_ex2f(runtime_job)
print(runtime_job.result().get("eigenvalue"))
# Please change backend before running the following code
runtime_job_real_device = prepare_ex2f(runtime_vqe, qubit_converter, es_problem, real_device=True)
print(runtime_job_real_device.result().get("eigenvalue"))
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
# General imports
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from pylab import cm
import warnings
warnings.filterwarnings("ignore")
# scikit-learn imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Qiskit imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
# Load MNIST dataset
DATA_PATH = './resources/ch3_part1.npz'
data = np.load(DATA_PATH)
sample_train = data['sample_train']
labels_train = data['labels_train']
sample_test = data['sample_test']
# Split train data
sample_train, sample_val, labels_train, labels_val = train_test_split(
sample_train, labels_train, test_size=0.2, random_state=42)
# Visualize samples
fig = plt.figure()
LABELS = [4, 9]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = sample_train[labels_train==LABELS[i]][0].reshape((28, 28))
ax.imshow(img, cmap="Greys")
# Standardize
ss = StandardScaler()
sample_train = ss.fit_transform(sample_train)
sample_val = ss.transform(sample_val)
sample_test = ss.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
sample_train = pca.fit_transform(sample_train)
sample_val = pca.transform(sample_val)
sample_test = pca.transform(sample_test)
# Normalize
mms = MinMaxScaler((-1, 1))
sample_train = mms.fit_transform(sample_train)
sample_val = mms.transform(sample_val)
sample_test = mms.transform(sample_test)
# 3 features, depth 2
map_z = ZFeatureMap(feature_dimension=3, reps=2)
map_z.decompose().draw('mpl')
# 3 features, depth 1, linear entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='linear')
map_zz.decompose().draw('mpl')
# 3 features, depth 1, circular entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='circular')
map_zz.decompose().draw('mpl')
# 3 features, depth 1
map_pauli = PauliFeatureMap(feature_dimension=3, reps=1, paulis = ['X', 'Y', 'ZZ'])
map_pauli.decompose().draw('mpl')
twolocal = TwoLocal(num_qubits=3, reps=2, rotation_blocks=['ry','rz'],
entanglement_blocks='cx', entanglement='circular', insert_barriers=True)
twolocal.decompose().draw('mpl')
twolocaln = NLocal(num_qubits=3, reps=2,
rotation_blocks=[RYGate(Parameter('a')), RZGate(Parameter('a'))],
entanglement_blocks=CXGate(),
entanglement='circular', insert_barriers=True)
twolocaln.decompose().draw('mpl')
print(f'First training data: {sample_train[0]}')
encode_map = PauliFeatureMap(feature_dimension=N_DIM, reps=1, paulis = ['X', 'Y', 'ZZ'])
encode_circuit = encode_map.bind_parameters(sample_train[0])
encode_circuit.decompose().draw(output='mpl')
##############################
# Provide your code here
ex3a_fmap = ZZFeatureMap(feature_dimension=5, reps=3, entanglement='circular')
##############################
# Check your answer and submit using the following code
from qc_grader import grade_ex3a
grade_ex3a(ex3a_fmap)
pauli_map = PauliFeatureMap(feature_dimension=N_DIM, reps=1, paulis = ['X', 'Y', 'ZZ'])
pauli_kernel = QuantumKernel(feature_map=pauli_map, quantum_instance=Aer.get_backend('statevector_simulator'))
print(f'First training data : {sample_train[0]}')
print(f'Second training data: {sample_train[1]}')
pauli_circuit = pauli_kernel.construct_circuit(sample_train[0], sample_train[1])
pauli_circuit.decompose().decompose().draw(output='mpl')
backend = Aer.get_backend('qasm_simulator')
job = execute(pauli_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(pauli_circuit)
print(f"Transition amplitude: {counts['0'*N_DIM]/sum(counts.values())}")
matrix_train = pauli_kernel.evaluate(x_vec=sample_train)
matrix_val = pauli_kernel.evaluate(x_vec=sample_val, y_vec=sample_train)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(matrix_train),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(matrix_val),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("validation kernel matrix")
plt.show()
x = [-0.5, -0.4, 0.3, 0, -0.9]
y = [0, -0.7, -0.3, 0, -0.4]
##############################
# Provide your code here
zz = ZZFeatureMap(feature_dimension=N_DIM, reps=3, entanglement='circular')
zz_kernel = QuantumKernel(feature_map=zz, quantum_instance=Aer.get_backend('statevector_simulator'))
zz_circuit = zz_kernel.construct_circuit(x, y)
backend = Aer.get_backend('qasm_simulator')
job = execute(zz_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(zz_circuit)
ex3b_amp = counts['0'*N_DIM]/sum(counts.values())
##############################
# Check your answer and submit using the following code
from qc_grader import grade_ex3b
grade_ex3b(ex3b_amp)
pauli_svc = SVC(kernel='precomputed')
pauli_svc.fit(matrix_train, labels_train)
pauli_score = pauli_svc.score(matrix_val, labels_val)
print(f'Precomputed kernel classification test score: {pauli_score*100}%')
# Load MNIST dataset
DATA_PATH = './resources/ch3_part2.npz'
data = np.load(DATA_PATH)
sample_train = data['sample_train']
labels_train = data['labels_train']
sample_test = data['sample_test']
# Split train data
sample_train, sample_val, labels_train, labels_val = train_test_split(
sample_train, labels_train, test_size=0.2, random_state=42)
# Visualize samples
fig = plt.figure()
LABELS = [0, 2, 3]
num_labels = len(LABELS)
for i in range(num_labels):
ax = fig.add_subplot(1, num_labels, i+1)
img = sample_train[labels_train==LABELS[i]][0].reshape((28, 28))
ax.imshow(img, cmap="Greys")
# Standardize
standard_scaler = StandardScaler()
sample_train = standard_scaler.fit_transform(sample_train)
sample_val = standard_scaler.transform(sample_val)
sample_test = standard_scaler.transform(sample_test)
# Reduce dimensions
N_DIM = 5
pca = PCA(n_components=N_DIM)
sample_train = pca.fit_transform(sample_train)
sample_val = pca.transform(sample_val)
sample_test = pca.transform(sample_test)
# Normalize
min_max_scaler = MinMaxScaler((-1, 1))
sample_train = min_max_scaler.fit_transform(sample_train)
sample_val = min_max_scaler.transform(sample_val)
sample_test = min_max_scaler.transform(sample_test)
labels_train_0 = np.where(labels_train==0, 1, 0)
labels_val_0 = np.where(labels_val==0, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 0 vs Rest: {labels_val_0}')
labels_train_2 = np.where(labels_train==2, 1, 0)
labels_val_2 = np.where(labels_val==2, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 2 vs Rest: {labels_val_2}')
labels_train_3 = np.where(labels_train==3, 1, 0)
labels_val_3 = np.where(labels_val==3, 1, 0)
print(f'Original validation labels: {labels_val}')
print(f'Validation labels for 2 vs Rest: {labels_val_3}')
pauli_map_0 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement = 'linear')
pauli_kernel_0 = QuantumKernel(feature_map=pauli_map_0, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_0 = SVC(kernel='precomputed', probability=True)
matrix_train_0 = pauli_kernel_0.evaluate(x_vec=sample_train)
pauli_svc_0.fit(matrix_train_0, labels_train_0)
matrix_val_0 = pauli_kernel_0.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_0 = pauli_svc_0.score(matrix_val_0, labels_val_0)
print(f'Accuracy of discriminating between label 0 and others: {pauli_score_0*100}%')
pauli_map_2 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement='linear')
pauli_kernel_2 = QuantumKernel(feature_map=pauli_map_2, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_2 = SVC(kernel='precomputed', probability=True)
matrix_train_2 = pauli_kernel_2.evaluate(x_vec=sample_train)
pauli_svc_2.fit(matrix_train_2, labels_train_2)
matrix_val_2 = pauli_kernel_2.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_2 = pauli_svc_2.score(matrix_val_2, labels_val_2)
print(f'Accuracy of discriminating between label 2 and others: {pauli_score_2*100}%')
pauli_map_3 = ZZFeatureMap(feature_dimension=N_DIM, reps=1, entanglement = 'linear')
pauli_kernel_3 = QuantumKernel(feature_map=pauli_map_3, quantum_instance=Aer.get_backend('statevector_simulator'))
pauli_svc_3 = SVC(kernel='precomputed', probability=True)
matrix_train_3 = pauli_kernel_3.evaluate(x_vec=sample_train)
pauli_svc_3.fit(matrix_train_3, labels_train_3)
matrix_val_3 = pauli_kernel_3.evaluate(x_vec=sample_val, y_vec=sample_train)
pauli_score_3 = pauli_svc_3.score(matrix_val_3, labels_val_3)
print(f'Accuracy of discriminating between label 3 and others: {pauli_score_3*100}%')
matrix_test_0 = pauli_kernel_0.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_0 = pauli_svc_0.predict_proba(matrix_test_0)[:, 1]
print(f'Probability of label 0: {np.round(pred_0, 2)}')
matrix_test_2 = pauli_kernel_2.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_2 = pauli_svc_2.predict_proba(matrix_test_2)[:, 1]
print(f'Probability of label 2: {np.round(pred_2, 2)}')
matrix_test_3 = pauli_kernel_3.evaluate(x_vec=sample_test, y_vec=sample_train)
pred_3 = pauli_svc_3.predict_proba(matrix_test_3)[:, 1]
print(f'Probability of label 3: {np.round(pred_3, 2)}')
##############################
# Provide your code here
pred_2 = pauli_svc_2.predict_proba(matrix_test_2)[:, 1]
##############################
##############################
# Provide your code here
pred_3 = pauli_svc_3.predict_proba(matrix_test_3)[:, 1]
##############################
sample_pred = np.load('./resources/ch3_part2_sub.npy')
print(f'Sample prediction: {sample_pred}')
pred_2_ex = np.array([0.7])
pred_3_ex = np.array([0.2])
pred_test_ex = np.where((pred_2_ex > pred_3_ex), 2, 3)
print(f'Prediction: {pred_test_ex}')
pred_2_ex = np.array([0.7, 0.1])
pred_3_ex = np.array([0.2, 0.6])
pred_test_ex = np.where((pred_2_ex > pred_3_ex), 2, 3)
print(f'Prediction: {pred_test_ex}')
##############################
# Provide your code here
prob_0 = np.array(np.round(pred_0,2))
prob_2 = np.array(np.round(pred_2,2))
prob_3 = np.array(np.round(pred_3,2))
def pred(pred_0, pred_2, pred_3):
prediction=[]
for i in range(len(pred_0)):
if pred_0[i]>pred_2[i] and pred_0[i]>pred_3[i]:
prediction.append(0)
elif pred_2[i]>pred_0[i] and pred_2[i]>pred_3[i]:
prediction.append(2)
else:
prediction.append(3)
return np.array(prediction)
test = pred(prob_0, prob_2, prob_3)
pred_test = np.array(test)
print(pred_test)
##############################
print(f'Sample prediction: {sample_pred}')
# Check your answer and submit using the following code
from qc_grader import grade_ex3c
grade_ex3c(pred_test, sample_train,
standard_scaler, pca, min_max_scaler,
pauli_kernel_0, pauli_kernel_2, pauli_kernel_3,
pauli_svc_0, pauli_svc_2, pauli_svc_3)
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit import Aer
from qiskit.utils import algorithm_globals, QuantumInstance
from qiskit.algorithms import QAOA, NumPyMinimumEigensolver
import numpy as np
val = [5,6,7,8,9]
wt = [4,5,6,7,8]
W = 18
def dp(W, wt, val, n):
k = [[0 for x in range(W + 1)] for x in range(n + 1)]
for i in range(n + 1):
for w in range(W + 1):
if i == 0 or w == 0:
k[i][w] = 0
elif wt[i-1] <= w:
k[i][w] = max(val[i-1] + k[i-1][w-wt[i-1]], k[i-1][w])
else:
k[i][w] = k[i-1][w]
picks=[0 for x in range(n)]
volume=W
for i in range(n,-1,-1):
if (k[i][volume]>k[i-1][volume]):
picks[i-1]=1
volume -= wt[i-1]
return k[n][W],picks
n = len(val)
print("optimal value:", dp(W, wt, val, n)[0])
print('\n index of the chosen items:')
for i in range(n):
if dp(W, wt, val, n)[1][i]:
print(i,end=' ')
# import packages necessary for application classes.
from qiskit_optimization.applications import Knapsack
def knapsack_quadratic_program():
# Put values, weights and max_weight parameter for the Knapsack()
##############################
# Provide your code here
prob = Knapsack(val, wt, W)
#
##############################
# to_quadratic_program generates a corresponding QuadraticProgram of the instance of the knapsack problem.
kqp = prob.to_quadratic_program()
return prob, kqp
prob,quadratic_program=knapsack_quadratic_program()
quadratic_program
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(quadratic_program)
print('result:\n', result)
print('\n index of the chosen items:', prob.interpret(result))
# QAOA
seed = 123
algorithm_globals.random_seed = seed
qins = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), shots=1000, seed_simulator=seed, seed_transpiler=seed)
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, quantum_instance=qins))
result = meo.solve(quadratic_program)
print('result:\n', result)
print('\n index of the chosen items:', prob.interpret(result))
# Check your answer and submit using the following code
from qc_grader import grade_ex4a
grade_ex4a(quadratic_program)
L1 = [5,3,3,6,9,7,1]
L2 = [8,4,5,12,10,11,2]
C1 = [1,1,2,1,1,1,2]
C2 = [3,2,3,2,4,3,3]
C_max = 16
def knapsack_argument(L1, L2, C1, C2, C_max):
##############################
# Provide your code here
values = [j-i for i,j in zip(L1,L2)]
weights = [j-i for i,j in zip(C1,C2)]
max_weight = max([i+j for i,j in zip(L1, L2)])
#
##############################
return values, weights, max_weight
values, weights, max_weight = knapsack_argument(L1, L2, C1, C2, C_max)
print(values, weights, max_weight)
prob = Knapsack(values = values, weights = weights, max_weight = max_weight)
qp = prob.to_quadratic_program()
qp
# Check your answer and submit using the following code
from qc_grader import grade_ex4b
grade_ex4b(knapsack_argument)
# QAOA
seed = 123
algorithm_globals.random_seed = seed
qins = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), shots=1000, seed_simulator=seed, seed_transpiler=seed)
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, quantum_instance=qins))
result = meo.solve(qp)
print('result:', result.x)
item = np.array(result.x)
revenue=0
for i in range(len(item)):
if item[i]==0:
revenue+=L1[i]
else:
revenue+=L2[i]
print('total revenue:', revenue)
instance_examples = [
{
'L1': [3, 7, 3, 4, 2, 6, 2, 2, 4, 6, 6],
'L2': [7, 8, 7, 6, 6, 9, 6, 7, 6, 7, 7],
'C1': [2, 2, 2, 3, 2, 4, 2, 2, 2, 2, 2],
'C2': [4, 3, 3, 4, 4, 5, 3, 4, 4, 3, 4],
'C_max': 33
},
{
'L1': [4, 2, 2, 3, 5, 3, 6, 3, 8, 3, 2],
'L2': [6, 5, 8, 5, 6, 6, 9, 7, 9, 5, 8],
'C1': [3, 3, 2, 3, 4, 2, 2, 3, 4, 2, 2],
'C2': [4, 4, 3, 5, 5, 3, 4, 5, 5, 3, 5],
'C_max': 38
},
{
'L1': [5, 4, 3, 3, 3, 7, 6, 4, 3, 5, 3],
'L2': [9, 7, 5, 5, 7, 8, 8, 7, 5, 7, 9],
'C1': [2, 2, 4, 2, 3, 4, 2, 2, 2, 2, 2],
'C2': [3, 4, 5, 4, 4, 5, 3, 3, 5, 3, 5],
'C_max': 35
}
]
from typing import List, Union
import math
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, assemble
from qiskit.compiler import transpile
from qiskit.circuit import Gate
from qiskit.circuit.library.standard_gates import *
from qiskit.circuit.library import QFT
def phase_return(index_qubits: int, gamma: float, L1: list, L2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
##############################
### U_1(gamma * (lambda2 - lambda1)) for each qubit ###
# Provide your code here
##############################
return qc.to_gate(label=" phase return ") if to_gate else qc
def subroutine_add_const(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qc = QuantumCircuit(data_qubits)
##############################
### Phase Rotation ###
# Provide your code here
##############################
return qc.to_gate(label=" [+"+str(const)+"] ") if to_gate else qc
def const_adder(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_data)
##############################
### QFT ###
# Provide your code here
##############################
##############################
### Phase Rotation ###
# Use `subroutine_add_const`
##############################
##############################
### IQFT ###
# Provide your code here
##############################
return qc.to_gate(label=" [ +" + str(const) + "] ") if to_gate else qc
def cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
for i, (val1, val2) in enumerate(zip(list1, list2)):
##############################
### Add val2 using const_adder controlled by i-th index register (set to 1) ###
# Provide your code here
##############################
qc.x(qr_index[i])
##############################
### Add val1 using const_adder controlled by i-th index register (set to 0) ###
# Provide your code here
##############################
qc.x(qr_index[i])
return qc.to_gate(label=" Cost Calculation ") if to_gate else qc
def constraint_testing(data_qubits: int, C_max: int, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
##############################
### Set the flag register for indices with costs larger than C_max ###
# Provide your code here
##############################
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
def penalty_dephasing(data_qubits: int, alpha: float, gamma: float, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
##############################
### Phase Rotation ###
# Provide your code here
##############################
return qc.to_gate(label=" Penalty Dephasing ") if to_gate else qc
def reinitialization(index_qubits: int, data_qubits: int, C1: list, C2: list, C_max: int, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_index, qr_data, qr_f)
##############################
### Reinitialization Circuit ###
# Provide your code here
##############################
return qc.to_gate(label=" Reinitialization ") if to_gate else qc
def mixing_operator(index_qubits: int, beta: float, to_gate = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
##############################
### Mixing Operator ###
# Provide your code here
##############################
return qc.to_gate(label=" Mixing Operator ") if to_gate else qc
def solver_function(L1: list, L2: list, C1: list, C2: list, C_max: int) -> QuantumCircuit:
# the number of qubits representing answers
index_qubits = len(L1)
# the maximum possible total cost
max_c = sum([max(l0, l1) for l0, l1 in zip(C1, C2)])
# the number of qubits representing data values can be defined using the maximum possible total cost as follows:
data_qubits = math.ceil(math.log(max_c, 2)) + 1 if not max_c & (max_c - 1) == 0 else math.ceil(math.log(max_c, 2)) + 2
### Phase Operator ###
# return part
def phase_return():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def subroutine_add_const():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def const_adder():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def cost_calculation():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def constraint_testing():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def penalty_dephasing():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
# penalty part
def reinitialization():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
### Mixing Operator ###
def mixing_operator():
##############################
### TODO ###
### Paste your code from above cells here ###
##############################
qr_index = QuantumRegister(index_qubits, "index") # index register
qr_data = QuantumRegister(data_qubits, "data") # data register
qr_f = QuantumRegister(1, "flag") # flag register
cr_index = ClassicalRegister(index_qubits, "c_index") # classical register storing the measurement result of index register
qc = QuantumCircuit(qr_index, qr_data, qr_f, cr_index)
### initialize the index register with uniform superposition state ###
qc.h(qr_index)
### DO NOT CHANGE THE CODE BELOW
p = 5
alpha = 1
for i in range(p):
### set fixed parameters for each round ###
beta = 1 - (i + 1) / p
gamma = (i + 1) / p
### return part ###
qc.append(phase_return(index_qubits, gamma, L1, L2), qr_index)
### step 1: cost calculation ###
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2), qr_index[:] + qr_data[:])
### step 2: Constraint testing ###
qc.append(constraint_testing(data_qubits, C_max), qr_data[:] + qr_f[:])
### step 3: penalty dephasing ###
qc.append(penalty_dephasing(data_qubits, alpha, gamma), qr_data[:] + qr_f[:])
### step 4: reinitialization ###
qc.append(reinitialization(index_qubits, data_qubits, C1, C2, C_max), qr_index[:] + qr_data[:] + qr_f[:])
### mixing operator ###
qc.append(mixing_operator(index_qubits, beta), qr_index)
### measure the index ###
### since the default measurement outcome is shown in big endian, it is necessary to reverse the classical bits in order to unify the endian ###
qc.measure(qr_index, cr_index[::-1])
return qc
# Execute your circuit with following prepare_ex4c() function.
# The prepare_ex4c() function works like the execute() function with only QuantumCircuit as an argument.
from qc_grader import prepare_ex4c
job = prepare_ex4c(solver_function)
result = job.result()
# Check your answer and submit using the following code
from qc_grader import grade_ex4c
grade_ex4c(job)
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
#Let us first import necessary libraries
from qiskit import Aer
from qiskit.algorithms import VQE, QAOA, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import COBYLA
from qiskit.circuit.library import TwoLocal
from qiskit.utils import QuantumInstance
from qiskit_finance.applications.optimization import PortfolioOptimization
from qiskit_finance.data_providers import RandomDataProvider
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit_optimization.applications import OptimizationApplication
from qiskit_optimization.converters import QuadraticProgramToQubo
import numpy as np
import matplotlib.pyplot as plt
import datetime
# set number of assets (= number of qubits)
num_assets = 4
seed = 103
# Generate expected return and covariance matrix from (random) time-series
stocks = [("TICKER%s" % i) for i in range(num_assets)]
data = RandomDataProvider(tickers=stocks,
start=datetime.datetime(2021,1,1),
end=datetime.datetime(2022,6,30),
seed=seed)
data.run()
mu = data.get_period_return_mean_vector()
sigma = data.get_period_return_covariance_matrix()
# Let's plot our finanical data
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('days')
plt.ylabel('stock value')
plt.show()
# Let's plot our covariance matrix Σ(sigma)
sigma = data.get_period_return_covariance_matrix() #Returns the covariance matrix of the four assets
print(sigma)
fig, ax = plt.subplots(1,1)
im = plt.imshow(sigma, extent=[-1,1,-1,1])
x_label_list = ['stock3', 'stock2', 'stock1', 'stock0']
y_label_list = ['stock3', 'stock2', 'stock1', 'stock0']
ax.set_xticks([-0.75,-0.25,0.25,0.75])
ax.set_yticks([0.75,0.25,-0.25,-0.75])
ax.set_xticklabels(x_label_list)
ax.set_yticklabels(y_label_list)
plt.colorbar()
plt.clim(-0.000002, 0.00001)
plt.show()
q = 0.5 # set risk factor
budget = num_assets // 2 # set budget
penalty = num_assets # set parameter to scale the budget penalty term
portfolio = PortfolioOptimization(expected_returns=mu, covariances=sigma, risk_factor=q, budget=budget)
qp = portfolio.to_quadratic_program()
qp
def index_to_selection(i, num_assets):
s = "{0:b}".format(i).rjust(num_assets)
x = np.array([1 if s[i]=='1' else 0 for i in reversed(range(num_assets))])
return x
def print_result(result):
selection = result.x
value = result.fval
print('Optimal: selection {}, value {:.4f}'.format(selection, value))
# eigenstate = result.min_eigen_solver_result.eigenstate
# eigenvector = eigenstate if isinstance(eigenstate, np.ndarray) else eigenstate.to_matrix()
# probabilities = np.abs(eigenvector)**2
# i_sorted = reversed(np.argsort(probabilities))
# print('\n----------------- Full result ---------------------')
# print('selection\tvalue\t\tprobability')
# print('---------------------------------------------------')
# for i in i_sorted:
# x = index_to_selection(i, num_assets)
# value = QuadraticProgramToQubo().convert(qp).objective.evaluate(x)
# #value = portfolio.to_quadratic_program().objective.evaluate(x)
# probability = probabilities[i]
# print('%10s\t%.4f\t\t%.4f' %(x, value, probability))
exact_mes = NumPyMinimumEigensolver()
exact_eigensolver = MinimumEigenOptimizer(exact_mes)
result = exact_eigensolver.solve(qp)
print_result(result)
from qiskit.utils import algorithm_globals
algorithm_globals.random_seed = 1234
backend = Aer.get_backend('statevector_simulator')
cobyla = COBYLA()
cobyla.set_options(maxiter=500)
ry = TwoLocal(num_assets, 'ry', 'cz', reps=3, entanglement='full')
quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed)
vqe_mes = VQE(ry, optimizer=cobyla, quantum_instance=quantum_instance)
vqe = MinimumEigenOptimizer(vqe_mes)
result = vqe.solve(qp)
print_result(result)
algorithm_globals.random_seed = 1234
backend = Aer.get_backend('statevector_simulator')
cobyla = COBYLA()
cobyla.set_options(maxiter=250)
quantum_instance = QuantumInstance(backend=backend, seed_simulator=seed, seed_transpiler=seed)
qaoa_mes = QAOA(optimizer=cobyla, reps=3, quantum_instance=quantum_instance)
qaoa = MinimumEigenOptimizer(qaoa_mes)
result = qaoa.solve(qp)
print_result(result)
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
import matplotlib.pyplot as plt
import numpy as np
from qiskit.utils import algorithm_globals
seed = 12345
algorithm_globals.random_seed = seed
from qiskit_machine_learning.datasets import ad_hoc_data
train_features, train_labels, test_features, test_labels, adhoc_total = ad_hoc_data(
training_size=20,
test_size=5,
n=2,
gap=0.3,
plot_data=False, one_hot=False, include_sample_total=True
)
def plot_adhoc_dataset(train_features, train_labels, test_features, test_labels, adhoc_total):
plt.figure(figsize=(8, 8))
plt.ylim(0, 2 * np.pi)
plt.xlim(0, 2 * np.pi)
plt.imshow(np.asmatrix(adhoc_total).T, interpolation='nearest',
origin='lower', cmap='RdBu', extent=[0, 2 * np.pi, 0, 2 * np.pi])
plt.scatter(train_features[np.where(train_labels[:] == 0), 0], train_features[np.where(train_labels[:] == 0), 1],
marker='s', facecolors='w', edgecolors='b', label="A train")
plt.scatter(train_features[np.where(train_labels[:] == 1), 0], train_features[np.where(train_labels[:] == 1), 1],
marker='o', facecolors='w', edgecolors='r', label="B train")
plt.scatter(test_features[np.where(test_labels[:] == 0), 0], test_features[np.where(test_labels[:] == 0), 1],
marker='s', facecolors='b', edgecolors='w', label="A test")
plt.scatter(test_features[np.where(test_labels[:] == 1), 0], test_features[np.where(test_labels[:] == 1), 1],
marker='o', facecolors='r', edgecolors='w', label="B test")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.title("Ad hoc dataset for classification")
plt.show()
plot_adhoc_dataset(train_features, train_labels, test_features, test_labels, adhoc_total)
print(train_features[0], train_labels[0])
print(train_features[20], train_labels[20])
from qiskit.circuit.library import EfficientSU2
circuit = EfficientSU2(num_qubits=3, reps=1, insert_barriers=True)
circuit.decompose().draw(output='mpl')
x = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1,1.2]
encode = circuit.bind_parameters(x)
encode.decompose().draw(output='mpl')
from qiskit.circuit.library import ZZFeatureMap
circuit = ZZFeatureMap(3, reps=1, insert_barriers=True)
circuit.decompose().draw(output='mpl')
x = [0.1,0.2,0.3]
encode = circuit.bind_parameters(x)
encode.decompose().draw(output='mpl')
from qiskit import Aer
from qiskit.utils import QuantumInstance
quantum_instance = QuantumInstance(Aer.get_backend('qasm_simulator'), shots=1024,
seed_simulator=seed, seed_transpiler=seed)
from qiskit.circuit.library import ZZFeatureMap
from qiskit_machine_learning.kernels import QuantumKernel
zz_feature_map = ZZFeatureMap(feature_dimension=2, reps=2)
zz_kernel = QuantumKernel(feature_map=zz_feature_map, quantum_instance=quantum_instance)
train_matrix = zz_kernel.evaluate(x_vec=train_features)
test_matrix = zz_kernel.evaluate(x_vec=test_features, y_vec=train_features)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(train_matrix),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(test_matrix),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("testing kernel matrix")
plt.show()
from sklearn.svm import SVC
zzpc_svc = SVC(kernel='precomputed')
zzpc_svc.fit(train_matrix, train_labels)
zzpc_score = zzpc_svc.score(test_matrix, test_labels)
print(f'Precomputed kernel classification test score: {zzpc_score}')
zzcb_svc = SVC(kernel=zz_kernel.evaluate)
zzcb_svc.fit(train_features, train_labels)
zzcb_score = zzcb_svc.score(test_features, test_labels)
print(f'Callable kernel classification test score: {zzcb_score}')
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
train_labels_oh = encoder.fit_transform(train_labels.reshape(-1, 1)).toarray()
test_labels_oh = encoder.fit_transform(test_labels.reshape(-1, 1)).toarray()
feature_map = ZZFeatureMap(feature_dimension=2, reps=2)
feature_map.decompose().draw('mpl')
from qiskit.circuit.library import RealAmplitudes
var_form = RealAmplitudes(2, reps=2)
var_form.decompose().draw('mpl')
# Optimizer callback function for plotting purposes
def store_intermediate_result(evaluation, parameter, cost, stepsize, accept):
evaluations.append(evaluation)
parameters.append(parameter)
costs.append(cost)
#initial_point = np.random.random(var_form.num_parameters)
initial_point = np.array([0.78969092, 0.50252316, 0.45244703,
0.83243915, 0.79595478, 0.42169888])
from qiskit_machine_learning.algorithms.classifiers import VQC
from qiskit.algorithms.optimizers import SPSA
vqc = VQC(feature_map=feature_map,
ansatz=var_form,
loss='cross_entropy',
optimizer=SPSA(callback=store_intermediate_result),
initial_point=initial_point,
quantum_instance=Aer.get_backend('statevector_simulator'))
parameters = []
costs = []
evaluations = []
vqc.fit(train_features, train_labels_oh)
fig = plt.figure()
plt.plot(evaluations, costs)
plt.xlabel('Steps')
plt.ylabel('Cost')
plt.show()
vqc.score(test_features, test_labels_oh)
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
import warnings
from h5py.h5py_warnings import H5pyDeprecationWarning
warnings.filterwarnings(action="ignore", category=H5pyDeprecationWarning)
from qiskit_nature.drivers import Molecule
molecule = Molecule(
# coordinates are given in Angstrom
geometry=[
["O", [0.0, 0.0, 0.115]],
["H", [0.0, 0.754, -0.459]],
["H", [0.0, -0.754, -0.459]],
],
multiplicity=1, # = 2*spin + 1
charge=0,
)
from qiskit_nature.drivers.second_quantization import ElectronicStructureMoleculeDriver, ElectronicStructureDriverType
driver = ElectronicStructureMoleculeDriver(
molecule=molecule,
basis="sto3g",
driver_type=ElectronicStructureDriverType.PYSCF,
)
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
problem = ElectronicStructureProblem(driver)
# this will call driver.run() internally
second_q_ops = problem.second_q_ops()
from qiskit_nature.operators.second_quantization import FermionicOp
# we increase the truncation value of the FermionicOp applied while printing
FermionicOp.set_truncation(500)
hamiltonian = second_q_ops[0]
print(hamiltonian)
from qiskit_nature.transformers.second_quantization.electronic import ActiveSpaceTransformer
transformer = ActiveSpaceTransformer(
num_electrons=2,
num_molecular_orbitals=3,
)
problem_reduced = ElectronicStructureProblem(driver, [transformer])
second_q_ops_reduced = problem_reduced.second_q_ops()
hamiltonian_reduced = second_q_ops_reduced[0]
print(hamiltonian_reduced)
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
jw_mapper = JordanWignerMapper()
jw_converter = QubitConverter(jw_mapper)
qubit_op_jw = jw_converter.convert(hamiltonian_reduced)
print(qubit_op_jw)
from qiskit_nature.mappers.second_quantization import ParityMapper
parity_mapper = ParityMapper()
parity_converter = QubitConverter(parity_mapper, two_qubit_reduction=True)
qubit_op_parity = parity_converter.convert(hamiltonian_reduced, num_particles=problem_reduced.num_particles)
print(qubit_op_parity)
from qiskit.algorithms.optimizers import SLSQP
from qiskit.providers.aer import StatevectorSimulator, QasmSimulator
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import VQEUCCFactory
vqe_factory = VQEUCCFactory(
quantum_instance=StatevectorSimulator(),
#quantum_instance=QasmSimulator(),
optimizer=SLSQP(),
)
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
solver = GroundStateEigensolver(parity_converter, vqe_factory)
result = solver.solve(problem_reduced)
print(result)
from urllib3.connection import SystemTimeWarning
warnings.filterwarnings(action="ignore", category=SystemTimeWarning)
import numpy as np
np.random.seed(42)
from qiskit.circuit.library import EfficientSU2
ansatz = EfficientSU2(num_qubits=qubit_op_parity.num_qubits, reps=1, entanglement='linear', insert_barriers=True)
ansatz.decompose().draw('mpl', style='iqx')
from qiskit.providers.ibmq import IBMQ, IBMQAccountError
try:
IBMQ.load_account()
provider = IBMQ.get_provider(group="open")
except IBMQAccountError:
print("No IBMQ account credentials available")
provider = None
else:
print("Provider supports runtime: ", provider.has_service("runtime"))
backend = provider.get_backend("ibmq_qasm_simulator")
from qiskit_nature.runtime import VQEClient
optimizer = {
"name": "SPSA",
"maxiter": 50,
}
initial_point = np.random.random(ansatz.num_parameters)
if provider is not None:
runtime_vqe = VQEClient(
ansatz=ansatz,
optimizer=optimizer,
initial_point=initial_point,
provider=provider,
backend=backend,
shots=1024,
measurement_error_mitigation=True,
)
if provider is not None:
runtime_vqe_solver = GroundStateEigensolver(parity_converter, runtime_vqe)
if provider is not None:
runtime_result = runtime_vqe_solver.solve(problem_reduced)
if provider is not None:
print(runtime_result)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/quantum-melbourne/qiskit-challenge-22
|
quantum-melbourne
|
from qiskit_optimization import QuadraticProgram
# Define QuadraticProgram
qp = QuadraticProgram()
# Add variables
qp.binary_var('x')
qp.binary_var('y')
qp.integer_var(lowerbound=0, upperbound=7, name='z')
# Add an objective function
qp.maximize(linear={'x': 2, 'y': 1, 'z': 1})
# Add a constraint
qp.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='LE', rhs=2, name='xyz_leq')
print(qp.export_as_lp_string())
import networkx as nx
# Make a graph with degree=2 and #node=5
graph = nx.random_regular_graph(d=2, n=5, seed=111)
pos = nx.spring_layout(graph, seed=111)
# Application class for a Max-cut problem
# Make a Max-cut problem from the graph
from qiskit_optimization.applications import Maxcut
maxcut = Maxcut(graph)
maxcut.draw(pos=pos)
# Make a QuadraticProgram by calling to_quadratic_program()
qp = maxcut.to_quadratic_program()
print(qp)
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit.algorithms import QAOA, NumPyMinimumEigensolver
qins = QuantumInstance(backend=Aer.get_backend('qasm_simulator'), shots=1000, seed_simulator=123)
# Define QAOA solver
meo = MinimumEigenOptimizer(min_eigen_solver=QAOA(reps=1, quantum_instance=qins))
result = meo.solve(qp)
print('result:\n', result)
print('\nsolution:\n', maxcut.interpret(result))
print('\ntime:', result.min_eigen_solver_result.optimizer_time)
maxcut.draw(result, pos=pos)
# Numpy Eigensolver
meo = MinimumEigenOptimizer(min_eigen_solver=NumPyMinimumEigensolver())
result = meo.solve(qp)
print('result:\n', result)
print('\nsolution:\n', maxcut.interpret(result))
maxcut.draw(result, pos=pos)
|
https://github.com/entropicalabs/OpenQAOA-Challenge---Qiskit-Fall-Fest-2022-Mexico
|
entropicalabs
|
from openqaoa.problems.problem import NumberPartition
np = NumberPartition(numbers=[1,2,3])
np_qubo = np.get_qubo_problem()
# visualize the QUBO form on a graph
from openqaoa.utilities import plot_graph, graph_from_hamiltonian
#extract Hamiltonain
cost_hamil = np_qubo.hamiltonian
#convert Hamiltonian to graph
cost_gr = graph_from_hamiltonian(cost_hamil)
#plot the graph
plot_graph(cost_gr)
from openqaoa.workflows.optimizer import QAOA
# Initialise QAOA using the default values
q = QAOA()
# Complile the QAOA workflow
q.compile(np_qubo)
# Optimise the problem!
q.optimize()
q.results.plot_probabilities()
# Initialise QAOA
q = QAOA()
q.compile(np_qubo)
q.variate_params
# Initialise QAOA
q = QAOA()
q.set_circuit_properties(p=1, param_type='standard', init_type='rand')
q.compile(np_qubo)
q.variate_params
import numpy
def my_submission_function(p):
"""
Note that the number of betas and gammas depend on the number of layers `p`!
Also, in OpenQAOA there are several parametrisation techiques which will change the structure
of the return of `my_submission_function`
For more details: https://el-openqaoa.readthedocs.io/en/latest/notebooks/05_advanced_parameterization.html
"""
return { 'betas' : [numpy.random.randn() for i in range(p)],
'gammas' : [numpy.random.randn() for i in range(p)]}
p = 2
q_submission = QAOA()
q_submission.set_circuit_properties(p=p,
param_type='standard',
init_type='custom',
variational_params_dict=my_submission_function(2))
my_submission_function(2)
q_submission.compile(np_qubo)
q_submission.optimize()
q_submission.results.plot_cost()
q_submission.results.plot_probabilities()
np_input = [numpy.random.randint(30) for i in range(10)]
np_input
|
https://github.com/entropicalabs/OpenQAOA-Challenge---Qiskit-Fall-Fest-2022-Mexico
|
entropicalabs
|
%load_ext autoreload
%autoreload 2
from IPython.display import clear_output
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import numpy as np
import networkx as nx
# Design the graph for the maxcut problem
g = nx.Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 4)
#import problem classes from OQ for easy problem creation
from openqaoa.problems.problem import MaximumCut
#import the QAOA workflow model
from openqaoa.workflows.optimizer import QAOA
#import method to specify the device
from openqaoa.devices import create_device
# Import the method to plot the graph
from openqaoa.utilities import plot_graph
plot_graph(g)
#Use the MaximumCut class to instantiate the problem.
maxcut_prob = MaximumCut(g)
# The binary values can be access via the `asdict()` method.
maxcut_qubo = maxcut_prob.get_qubo_problem()
maxcut_qubo.asdict()
# init the object QAOA for default the backend is vectorised
q = QAOA()
q.local_simulators, q.cloud_provider
from openqaoa.devices import create_device
sim = create_device(location='local', name='vectorized')
q.set_device(sim)
q.compile(maxcut_qubo)
q.optimize()
q.results.most_probable_states['solutions_bitstrings']
q.results.most_probable_states['bitstring_energy']
q.results.get_counts(q.results.optimized['optimized measurement outcomes'])
q.results.plot_cost()
|
https://github.com/entropicalabs/OpenQAOA-Challenge---Qiskit-Fall-Fest-2022-Mexico
|
entropicalabs
|
# Methods to use in the notebook
%load_ext autoreload
%autoreload 2
from IPython.display import clear_output
# Libaries to manipulate, print, plot, and save the data.
import matplotlib.pyplot as plt
import numpy as np
# Random generator for the problem
from random import seed,randrange, randint
# OpenQAOA libraries to design, execute the algorithm in different backends and optimisers
from openqaoa.problems.problem import Knapsack
from openqaoa.workflows.optimizer import QAOA
from openqaoa.devices import create_device
from openqaoa.optimizers.qaoa_optimizer import available_optimizers
from openqaoa.utilities import ground_state_hamiltonian
# Import the method to plot the graph
from openqaoa.utilities import plot_graph
np.random.seed(1)
num_items = 3 # number of items
weight_capacity = 10 # max weight of a bin
penalty = 1.0
values = np.random.randint(1, weight_capacity, num_items)
weights = np.random.randint(1, weight_capacity, num_items)
print(values,weights,weight_capacity,penalty)
qubo = Knapsack(values, weights, weight_capacity,penalty).get_qubo_problem()
qubo.asdict()
# In OpenQAOA you can use different devices either local
# or from different cloud backends such as Qiskit, Pyquil.
sim = create_device(location='local', name='vectorized')
# Init the object QAOA and certain of its properties can be modified
q = QAOA()
q.set_device(sim)
q.compile(qubo)
# Execute the quantum algorithm
q.optimize()
print("solution bitstring: ",q.results.most_probable_states['solutions_bitstrings'])
print("ground state: ",ground_state_hamiltonian(q.cost_hamil)[1])
optimisers = available_optimizers()['scipy']+ available_optimizers()['custom_scipy_gradient']
optimisers
jac = ['finite_difference', 'param_shift','grad_spsa', 'stoch_param_shift']
hess = jac[0]
|
https://github.com/entropicalabs/OpenQAOA-Challenge---Qiskit-Fall-Fest-2022-Mexico
|
entropicalabs
|
# Methods to use in the notebook
%load_ext autoreload
%autoreload 2
from IPython.display import clear_output
# Libaries to manipulate, print, plot, and save the data.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Random generator for the problem
from random import seed,randrange, randint
# OpenQAOA libraries to design, execute the algorithm in different backends and optimisers
from openqaoa.problems.problem import NumberPartition
from openqaoa.workflows.optimizer import QAOA
from openqaoa.devices import create_device
from openqaoa.optimizers.qaoa_optimizer import available_optimizers
from openqaoa.utilities import ground_state_hamiltonian
# To reproduce the code and make this a problem that has optimal states in the bipartition problem,
# the following seed is used
seed(9)
# Generate four random number with a range between (1,5) callend the variable in_list
in_list = [randrange(1, 5, 1) for i in range(4)]
in_list
qubo = NumberPartition(in_list).get_qubo_problem()
qubo.asdict()
# In OpenQAOA you can use different devices either local
# or from different cloud backends such as Qiskit, Pyquil.
sim = create_device(location='local', name='vectorized')
# Init the object QAOA and certain of its properties can be modified
q = QAOA()
q.set_device(sim)
q.compile(qubo)
# Execute the quantum algorithm
q.optimize()
print("solution bitstring: ",q.results.most_probable_states['solutions_bitstrings'])
print("ground state: ",ground_state_hamiltonian(q.cost_hamil)[1])
optimisers = available_optimizers()['scipy']+ available_optimizers()['custom_scipy_gradient']
optimisers
jac = ['finite_difference', 'param_shift','grad_spsa', 'stoch_param_shift']
hess = jac[0]
|
https://github.com/QuCO-CSAM/Solving-Combinatorial-Optimisation-Problems-Using-Quantum-Algorithms
|
QuCO-CSAM
|
#In case you don't have qiskit, install it now
%pip install qiskit --quiet
#Installing/upgrading pylatexenc seems to have fixed my mpl issue
#If you try this and it doesn't work, try also restarting the runtime/kernel
%pip install pylatexenc --quiet
!pip install -Uqq ipdb
!pip install qiskit_optimization
import networkx as nx
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer
from qiskit.compiler import transpile
from qiskit.quantum_info.operators import Operator, Pauli
from qiskit.quantum_info import process_fidelity
from qiskit.extensions.hamiltonian_gate import HamiltonianGate
from qiskit.extensions import RXGate, XGate, CXGate
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute
import numpy as np
from qiskit.visualization import plot_histogram
import ipdb
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
#quadratic optimization
from qiskit_optimization import QuadraticProgram
from qiskit_optimization.converters import QuadraticProgramToQubo
%pdb on
# def ApplyCost(qc, gamma):
# Ix = np.array([[1,0],[0,1]])
# Zx= np.array([[1,0],[0,-1]])
# Xx = np.array([[0,1],[1,0]])
# Temp = (Ix-Zx)/2
# T = Operator(Temp)
# I = Operator(Ix)
# Z = Operator(Zx)
# X = Operator(Xx)
# FinalOp=-2*(T^I^T)-(I^T^T)-(T^I^I)+2*(I^T^I)-3*(I^I^T)
# ham = HamiltonianGate(FinalOp,gamma)
# qc.append(ham,[0,1,2])
task = QuadraticProgram(name = 'QUBO on QC')
task.binary_var(name = 'x')
task.binary_var(name = 'y')
task.binary_var(name = 'z')
task.minimize(linear = {"x":-1,"y":2,"z":-3}, quadratic = {("x", "z"): -2, ("y", "z"): -1})
qubo = QuadraticProgramToQubo().convert(task) #convert to QUBO
operator, offset = qubo.to_ising()
print(operator)
# ham = HamiltonianGate(operator,0)
# print(ham)
Ix = np.array([[1,0],[0,1]])
Zx= np.array([[1,0],[0,-1]])
Xx = np.array([[0,1],[1,0]])
Temp = (Ix-Zx)/2
T = Operator(Temp)
I = Operator(Ix)
Z = Operator(Zx)
X = Operator(Xx)
FinalOp=-2*(T^I^T)-(I^T^T)-(T^I^I)+2*(I^T^I)-3*(I^I^T)
ham = HamiltonianGate(FinalOp,0)
print(ham)
#define PYBIND11_DETAILED_ERROR_MESSAGES
def compute_expectation(counts):
"""
Computes expectation value based on measurement results
Args:
counts: dict
key as bitstring, val as count
G: networkx graph
Returns:
avg: float
expectation value
"""
avg = 0
sum_count = 0
for bitstring, count in counts.items():
x = int(bitstring[2])
y = int(bitstring[1])
z = int(bitstring[0])
obj = -2*x*z-y*z-x+2*y-3*z
avg += obj * count
sum_count += count
return avg/sum_count
# We will also bring the different circuit components that
# build the qaoa circuit under a single function
def create_qaoa_circ(theta):
"""
Creates a parametrized qaoa circuit
Args:
G: networkx graph
theta: list
unitary parameters
Returns:
qc: qiskit circuit
"""
nqubits = 3
n,m=3,3
p = len(theta)//2 # number of alternating unitaries
qc = QuantumCircuit(nqubits,nqubits)
Ix = np.array([[1,0],[0,1]])
Zx= np.array([[1,0],[0,-1]])
Xx = np.array([[0,1],[1,0]])
Temp = (Ix-Zx)/2
T = Operator(Temp)
I = Operator(Ix)
Z = Operator(Zx)
X = Operator(Xx)
FinalOp=-2*(Z^I^Z)-(I^Z^Z)-(Z^I^I)+2*(I^Z^I)-3*(I^I^Z)
beta = theta[:p]
gamma = theta[p:]
# initial_state
for i in range(0, nqubits):
qc.h(i)
for irep in range(0, p):
#ipdb.set_trace(context=6)
# problem unitary
# for pair in list(G.edges()):
# qc.rzz(2 * gamma[irep], pair[0], pair[1])
#ApplyCost(qc,2*0)
ham = HamiltonianGate(operator,2 * gamma[irep])
qc.append(ham,[0,1,2])
# mixer unitary
for i in range(0, nqubits):
qc.rx(2 * beta[irep], i)
qc.measure(qc.qubits[:n],qc.clbits[:m])
return qc
# Finally we write a function that executes the circuit on the chosen backend
def get_expectation(shots=512):
"""
Runs parametrized circuit
Args:
G: networkx graph
p: int,
Number of repetitions of unitaries
"""
backend = Aer.get_backend('qasm_simulator')
backend.shots = shots
def execute_circ(theta):
qc = create_qaoa_circ(theta)
# ipdb.set_trace(context=6)
counts = {}
job = execute(qc, backend, shots=1024)
result = job.result()
counts=result.get_counts(qc)
return compute_expectation(counts)
return execute_circ
from scipy.optimize import minimize
expectation = get_expectation()
res = minimize(expectation, [1, 1], method='COBYLA')
expectation = get_expectation()
res = minimize(expectation, res.x, method='COBYLA')
res
from qiskit.visualization import plot_histogram
backend = Aer.get_backend('aer_simulator')
backend.shots = 512
qc_res = create_qaoa_circ(res.x)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc_res, backend, shots=1024)
result = job.result()
counts=result.get_counts(qc_res)
plot_histogram(counts)
|
https://github.com/QuCO-CSAM/Solving-Combinatorial-Optimisation-Problems-Using-Quantum-Algorithms
|
QuCO-CSAM
|
#Assign these values as per your requirements.
global min_qubits,max_qubits,skip_qubits,max_circuits,num_shots,Noise_Inclusion
min_qubits=4
max_qubits=15 #reference files are upto 12 Qubits only
skip_qubits=2
max_circuits=3
num_shots=4092
gate_counts_plots = True
Noise_Inclusion = False
saveplots = False
Memory_utilization_plot = True
Type_of_Simulator = "built_in" #Inputs are "built_in" or "FAKE" or "FAKEV2"
backend_name = "FakeGuadalupeV2" #Can refer to the README files for the available backends
#Change your Specification of Simulator in Declaring Backend Section
#By Default : built_in -> qasm_simulator and FAKE -> FakeSantiago() and FAKEV2 -> FakeSantiagoV2()
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, transpile, execute
from qiskit.opflow import PauliTrotterEvolution, Suzuki
from qiskit.opflow.primitive_ops import PauliSumOp
import time,os,json
import matplotlib.pyplot as plt
# Import from Qiskit Aer noise module
from qiskit_aer.noise import (NoiseModel, QuantumError, ReadoutError,pauli_error, depolarizing_error, thermal_relaxation_error,reset_error)
# Benchmark Name
benchmark_name = "VQE Simulation"
# Selection of basis gate set for transpilation
# Note: selector 1 is a hardware agnostic gate set
basis_selector = 1
basis_gates_array = [
[],
['rx', 'ry', 'rz', 'cx'], # a common basis set, default
['cx', 'rz', 'sx', 'x'], # IBM default basis set
['rx', 'ry', 'rxx'], # IonQ default basis set
['h', 'p', 'cx'], # another common basis set
['u', 'cx'] # general unitaries basis gates
]
np.random.seed(0)
def get_QV(backend):
import json
# Assuming backend.conf_filename is the filename and backend.dirname is the directory path
conf_filename = backend.dirname + "/" + backend.conf_filename
# Open the JSON file
with open(conf_filename, 'r') as file:
# Load the JSON data
data = json.load(file)
# Extract the quantum_volume parameter
QV = data.get('quantum_volume', None)
return QV
def checkbackend(backend_name,Type_of_Simulator):
if Type_of_Simulator == "built_in":
available_backends = []
for i in Aer.backends():
available_backends.append(i.name)
if backend_name in available_backends:
platform = backend_name
return platform
else:
print(f"incorrect backend name or backend not available. Using qasm_simulator by default !!!!")
print(f"available backends are : {available_backends}")
platform = "qasm_simulator"
return platform
elif Type_of_Simulator == "FAKE" or Type_of_Simulator == "FAKEV2":
import qiskit.providers.fake_provider as fake_backends
if hasattr(fake_backends,backend_name) is True:
print(f"Backend {backend_name} is available for type {Type_of_Simulator}.")
backend_class = getattr(fake_backends,backend_name)
backend_instance = backend_class()
return backend_instance
else:
print(f"Backend {backend_name} is not available or incorrect for type {Type_of_Simulator}. Executing with FakeSantiago!!!")
if Type_of_Simulator == "FAKEV2":
backend_class = getattr(fake_backends,"FakeSantiagoV2")
else:
backend_class = getattr(fake_backends,"FakeSantiago")
backend_instance = backend_class()
return backend_instance
if Type_of_Simulator == "built_in":
platform = checkbackend(backend_name,Type_of_Simulator)
#By default using "Qasm Simulator"
backend = Aer.get_backend(platform)
QV_=None
print(f"{platform} device is capable of running {backend.num_qubits}")
print(f"backend version is {backend.backend_version}")
elif Type_of_Simulator == "FAKE":
basis_selector = 0
backend = checkbackend(backend_name,Type_of_Simulator)
QV_ = get_QV(backend)
platform = backend.properties().backend_name +"-"+ backend.properties().backend_version #Replace this string with the backend Provider's name as this is used for Plotting.
max_qubits=backend.configuration().n_qubits
print(f"{platform} device is capable of running {backend.configuration().n_qubits}")
print(f"{platform} has QV={QV_}")
if max_qubits > 30:
print(f"Device is capable with max_qubits = {max_qubits}")
max_qubit = 30
print(f"Using fake backend {platform} with max_qubits {max_qubits}")
elif Type_of_Simulator == "FAKEV2":
basis_selector = 0
if "V2" not in backend_name:
backend_name = backend_name+"V2"
backend = checkbackend(backend_name,Type_of_Simulator)
QV_ = get_QV(backend)
platform = backend.name +"-" +backend.backend_version
max_qubits=backend.num_qubits
print(f"{platform} device is capable of running {backend.num_qubits}")
print(f"{platform} has QV={QV_}")
if max_qubits > 30:
print(f"Device is capable with max_qubits = {max_qubits}")
max_qubit = 30
print(f"Using fake backend {platform} with max_qubits {max_qubits}")
else:
print("Enter valid Simulator.....")
# saved circuits for display
QC_ = None
Hf_ = None
CO_ = None
################### Circuit Definition #######################################
# Construct a Qiskit circuit for VQE Energy evaluation with UCCSD ansatz
# param: n_spin_orbs - The number of spin orbitals.
# return: return a Qiskit circuit for this VQE ansatz
def VQEEnergy(n_spin_orbs, na, nb, circuit_id=0, method=1):
# number of alpha spin orbitals
norb_a = int(n_spin_orbs / 2)
# construct the Hamiltonian
qubit_op = ReadHamiltonian(n_spin_orbs)
# allocate qubits
num_qubits = n_spin_orbs
qr = QuantumRegister(num_qubits)
qc = QuantumCircuit(qr, name=f"vqe-ansatz({method})-{num_qubits}-{circuit_id}")
# initialize the HF state
Hf = HartreeFock(num_qubits, na, nb)
qc.append(Hf, qr)
# form the list of single and double excitations
excitationList = []
for occ_a in range(na):
for vir_a in range(na, norb_a):
excitationList.append((occ_a, vir_a))
for occ_b in range(norb_a, norb_a+nb):
for vir_b in range(norb_a+nb, n_spin_orbs):
excitationList.append((occ_b, vir_b))
for occ_a in range(na):
for vir_a in range(na, norb_a):
for occ_b in range(norb_a, norb_a+nb):
for vir_b in range(norb_a+nb, n_spin_orbs):
excitationList.append((occ_a, vir_a, occ_b, vir_b))
# get cluster operators in Paulis
pauli_list = readPauliExcitation(n_spin_orbs, circuit_id)
# loop over the Pauli operators
for index, PauliOp in enumerate(pauli_list):
# get circuit for exp(-iP)
cluster_qc = ClusterOperatorCircuit(PauliOp, excitationList[index])
# add to ansatz
qc.append(cluster_qc, [i for i in range(cluster_qc.num_qubits)])
# method 1, only compute the last term in the Hamiltonian
if method == 1:
# last term in Hamiltonian
qc_with_mea, is_diag = ExpectationCircuit(qc, qubit_op[1], num_qubits)
# return the circuit
return qc_with_mea
# now we need to add the measurement parts to the circuit
# circuit list
qc_list = []
diag = []
off_diag = []
global normalization
normalization = 0.0
# add the first non-identity term
identity_qc = qc.copy()
identity_qc.measure_all()
qc_list.append(identity_qc) # add to circuit list
diag.append(qubit_op[1])
normalization += abs(qubit_op[1].coeffs[0]) # add to normalization factor
diag_coeff = abs(qubit_op[1].coeffs[0]) # add to coefficients of diagonal terms
# loop over rest of terms
for index, p in enumerate(qubit_op[2:]):
# get the circuit with expectation measurements
qc_with_mea, is_diag = ExpectationCircuit(qc, p, num_qubits)
# accumulate normalization
normalization += abs(p.coeffs[0])
# add to circuit list if non-diagonal
if not is_diag:
qc_list.append(qc_with_mea)
else:
diag_coeff += abs(p.coeffs[0])
# diagonal term
if is_diag:
diag.append(p)
# off-diagonal term
else:
off_diag.append(p)
# modify the name of diagonal circuit
qc_list[0].name = qubit_op[1].primitive.to_list()[0][0] + " " + str(np.real(diag_coeff))
normalization /= len(qc_list)
return qc_list
# Function that constructs the circuit for a given cluster operator
def ClusterOperatorCircuit(pauli_op, excitationIndex):
# compute exp(-iP)
exp_ip = pauli_op.exp_i()
# Trotter approximation
qc_op = PauliTrotterEvolution(trotter_mode=Suzuki(order=1, reps=1)).convert(exp_ip)
# convert to circuit
qc = qc_op.to_circuit(); qc.name = f'Cluster Op {excitationIndex}'
global CO_
if CO_ == None or qc.num_qubits <= 4:
if qc.num_qubits < 7: CO_ = qc
# return this circuit
return qc
# Function that adds expectation measurements to the raw circuits
def ExpectationCircuit(qc, pauli, nqubit, method=2):
# copy the unrotated circuit
raw_qc = qc.copy()
# whether this term is diagonal
is_diag = True
# primitive Pauli string
PauliString = pauli.primitive.to_list()[0][0]
# coefficient
coeff = pauli.coeffs[0]
# basis rotation
for i, p in enumerate(PauliString):
target_qubit = nqubit - i - 1
if (p == "X"):
is_diag = False
raw_qc.h(target_qubit)
elif (p == "Y"):
raw_qc.sdg(target_qubit)
raw_qc.h(target_qubit)
is_diag = False
# perform measurements
raw_qc.measure_all()
# name of this circuit
raw_qc.name = PauliString + " " + str(np.real(coeff))
# save circuit
global QC_
if QC_ == None or nqubit <= 4:
if nqubit < 7: QC_ = raw_qc
return raw_qc, is_diag
# Function that implements the Hartree-Fock state
def HartreeFock(norb, na, nb):
# initialize the quantum circuit
qc = QuantumCircuit(norb, name="Hf")
# alpha electrons
for ia in range(na):
qc.x(ia)
# beta electrons
for ib in range(nb):
qc.x(ib+int(norb/2))
# Save smaller circuit
global Hf_
if Hf_ == None or norb <= 4:
if norb < 7: Hf_ = qc
# return the circuit
return qc
################ Helper Functions
# Function that converts a list of single and double excitation operators to Pauli operators
def readPauliExcitation(norb, circuit_id=0):
# load pre-computed data
filename = os.path.join(f'ansatzes/{norb}_qubit_{circuit_id}.txt')
with open(filename) as f:
data = f.read()
ansatz_dict = json.loads(data)
# initialize Pauli list
pauli_list = []
# current coefficients
cur_coeff = 1e5
# current Pauli list
cur_list = []
# loop over excitations
for ext in ansatz_dict:
if cur_coeff > 1e4:
cur_coeff = ansatz_dict[ext]
cur_list = [(ext, ansatz_dict[ext])]
elif abs(abs(ansatz_dict[ext]) - abs(cur_coeff)) > 1e-4:
pauli_list.append(PauliSumOp.from_list(cur_list))
cur_coeff = ansatz_dict[ext]
cur_list = [(ext, ansatz_dict[ext])]
else:
cur_list.append((ext, ansatz_dict[ext]))
# add the last term
pauli_list.append(PauliSumOp.from_list(cur_list))
# return Pauli list
return pauli_list
# Get the Hamiltonian by reading in pre-computed file
def ReadHamiltonian(nqubit):
# load pre-computed data
filename = os.path.join(f'Hamiltonians/{nqubit}_qubit.txt')
with open(filename) as f:
data = f.read()
ham_dict = json.loads(data)
# pauli list
pauli_list = []
for p in ham_dict:
pauli_list.append( (p, ham_dict[p]) )
# build Hamiltonian
ham = PauliSumOp.from_list(pauli_list)
# return Hamiltonian
return ham
# Create an empty noise model
noise_parameters = NoiseModel()
if Type_of_Simulator == "built_in":
# Add depolarizing error to all single qubit gates with error rate 0.05% and to all two qubit gates with error rate 0.5%
depol_one_qb_error = 0.05
depol_two_qb_error = 0.005
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(depol_one_qb_error, 1), ['rx', 'ry', 'rz'])
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(depol_two_qb_error, 2), ['cx'])
# Add amplitude damping error to all single qubit gates with error rate 0.0% and to all two qubit gates with error rate 0.0%
amp_damp_one_qb_error = 0.0
amp_damp_two_qb_error = 0.0
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(amp_damp_one_qb_error, 1), ['rx', 'ry', 'rz'])
noise_parameters.add_all_qubit_quantum_error(depolarizing_error(amp_damp_two_qb_error, 2), ['cx'])
# Add reset noise to all single qubit resets
reset_to_zero_error = 0.005
reset_to_one_error = 0.005
noise_parameters.add_all_qubit_quantum_error(reset_error(reset_to_zero_error, reset_to_one_error),["reset"])
# Add readout error
p0given1_error = 0.000
p1given0_error = 0.000
error_meas = ReadoutError([[1 - p1given0_error, p1given0_error], [p0given1_error, 1 - p0given1_error]])
noise_parameters.add_all_qubit_readout_error(error_meas)
#print(noise_parameters)
elif Type_of_Simulator == "FAKE"or"FAKEV2":
noise_parameters = NoiseModel.from_backend(backend)
#print(noise_parameters)
### Analysis methods to be expanded and eventually compiled into a separate analysis.py file
import math, functools
def hellinger_fidelity_with_expected(p, q):
""" p: result distribution, may be passed as a counts distribution
q: the expected distribution to be compared against
References:
`Hellinger Distance @ wikipedia <https://en.wikipedia.org/wiki/Hellinger_distance>`_
Qiskit Hellinger Fidelity Function
"""
p_sum = sum(p.values())
q_sum = sum(q.values())
if q_sum == 0:
print("ERROR: polarization_fidelity(), expected distribution is invalid, all counts equal to 0")
return 0
p_normed = {}
for key, val in p.items():
p_normed[key] = val/p_sum
# if p_sum != 0:
# p_normed[key] = val/p_sum
# else:
# p_normed[key] = 0
q_normed = {}
for key, val in q.items():
q_normed[key] = val/q_sum
total = 0
for key, val in p_normed.items():
if key in q_normed.keys():
total += (np.sqrt(val) - np.sqrt(q_normed[key]))**2
del q_normed[key]
else:
total += val
total += sum(q_normed.values())
# in some situations (error mitigation) this can go negative, use abs value
if total < 0:
print(f"WARNING: using absolute value in fidelity calculation")
total = abs(total)
dist = np.sqrt(total)/np.sqrt(2)
fidelity = (1-dist**2)**2
return fidelity
def polarization_fidelity(counts, correct_dist, thermal_dist=None):
"""
Combines Hellinger fidelity and polarization rescaling into fidelity calculation
used in every benchmark
counts: the measurement outcomes after `num_shots` algorithm runs
correct_dist: the distribution we expect to get for the algorithm running perfectly
thermal_dist: optional distribution to pass in distribution from a uniform
superposition over all states. If `None`: generated as
`uniform_dist` with the same qubits as in `counts`
returns both polarization fidelity and the hellinger fidelity
Polarization from: `https://arxiv.org/abs/2008.11294v1`
"""
num_measured_qubits = len(list(correct_dist.keys())[0])
#print(num_measured_qubits)
counts = {k.zfill(num_measured_qubits): v for k, v in counts.items()}
# calculate hellinger fidelity between measured expectation values and correct distribution
hf_fidelity = hellinger_fidelity_with_expected(counts,correct_dist)
# to limit cpu and memory utilization, skip noise correction if more than 16 measured qubits
if num_measured_qubits > 16:
return { 'fidelity':hf_fidelity, 'hf_fidelity':hf_fidelity }
# if not provided, generate thermal dist based on number of qubits
if thermal_dist == None:
thermal_dist = uniform_dist(num_measured_qubits)
# set our fidelity rescaling value as the hellinger fidelity for a depolarized state
floor_fidelity = hellinger_fidelity_with_expected(thermal_dist, correct_dist)
# rescale fidelity result so uniform superposition (random guessing) returns fidelity
# rescaled to 0 to provide a better measure of success of the algorithm (polarization)
new_floor_fidelity = 0
fidelity = rescale_fidelity(hf_fidelity, floor_fidelity, new_floor_fidelity)
return { 'fidelity':fidelity, 'hf_fidelity':hf_fidelity }
## Uniform distribution function commonly used
def rescale_fidelity(fidelity, floor_fidelity, new_floor_fidelity):
"""
Linearly rescales our fidelities to allow comparisons of fidelities across benchmarks
fidelity: raw fidelity to rescale
floor_fidelity: threshold fidelity which is equivalent to random guessing
new_floor_fidelity: what we rescale the floor_fidelity to
Ex, with floor_fidelity = 0.25, new_floor_fidelity = 0.0:
1 -> 1;
0.25 -> 0;
0.5 -> 0.3333;
"""
rescaled_fidelity = (1-new_floor_fidelity)/(1-floor_fidelity) * (fidelity - 1) + 1
# ensure fidelity is within bounds (0, 1)
if rescaled_fidelity < 0:
rescaled_fidelity = 0.0
if rescaled_fidelity > 1:
rescaled_fidelity = 1.0
return rescaled_fidelity
def uniform_dist(num_state_qubits):
dist = {}
for i in range(2**num_state_qubits):
key = bin(i)[2:].zfill(num_state_qubits)
dist[key] = 1/(2**num_state_qubits)
return dist
from matplotlib.patches import Rectangle
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap, Normalize
from matplotlib.patches import Circle
############### Color Map functions
# Create a selection of colormaps from which to choose; default to custom_spectral
cmap_spectral = plt.get_cmap('Spectral')
cmap_greys = plt.get_cmap('Greys')
cmap_blues = plt.get_cmap('Blues')
cmap_custom_spectral = None
# the default colormap is the spectral map
cmap = cmap_spectral
cmap_orig = cmap_spectral
# current cmap normalization function (default None)
cmap_norm = None
default_fade_low_fidelity_level = 0.16
default_fade_rate = 0.7
# Specify a normalization function here (default None)
def set_custom_cmap_norm(vmin, vmax):
global cmap_norm
if vmin == vmax or (vmin == 0.0 and vmax == 1.0):
print("... setting cmap norm to None")
cmap_norm = None
else:
print(f"... setting cmap norm to [{vmin}, {vmax}]")
cmap_norm = Normalize(vmin=vmin, vmax=vmax)
# Remake the custom spectral colormap with user settings
def set_custom_cmap_style(
fade_low_fidelity_level=default_fade_low_fidelity_level,
fade_rate=default_fade_rate):
#print("... set custom map style")
global cmap, cmap_custom_spectral, cmap_orig
cmap_custom_spectral = create_custom_spectral_cmap(
fade_low_fidelity_level=fade_low_fidelity_level, fade_rate=fade_rate)
cmap = cmap_custom_spectral
cmap_orig = cmap_custom_spectral
# Create the custom spectral colormap from the base spectral
def create_custom_spectral_cmap(
fade_low_fidelity_level=default_fade_low_fidelity_level,
fade_rate=default_fade_rate):
# determine the breakpoint from the fade level
num_colors = 100
breakpoint = round(fade_low_fidelity_level * num_colors)
# get color list for spectral map
spectral_colors = [cmap_spectral(v/num_colors) for v in range(num_colors)]
#print(fade_rate)
# create a list of colors to replace those below the breakpoint
# and fill with "faded" color entries (in reverse)
low_colors = [0] * breakpoint
#for i in reversed(range(breakpoint)):
for i in range(breakpoint):
# x is index of low colors, normalized 0 -> 1
x = i / breakpoint
# get color at this index
bc = spectral_colors[i]
r0 = bc[0]
g0 = bc[1]
b0 = bc[2]
z0 = bc[3]
r_delta = 0.92 - r0
#print(f"{x} {bc} {r_delta}")
# compute saturation and greyness ratio
sat_ratio = 1 - x
#grey_ratio = 1 - x
''' attempt at a reflective gradient
if i >= breakpoint/2:
xf = 2*(x - 0.5)
yf = pow(xf, 1/fade_rate)/2
grey_ratio = 1 - (yf + 0.5)
else:
xf = 2*(0.5 - x)
yf = pow(xf, 1/fade_rate)/2
grey_ratio = 1 - (0.5 - yf)
'''
grey_ratio = 1 - math.pow(x, 1/fade_rate)
#print(f" {xf} {yf} ")
#print(f" {sat_ratio} {grey_ratio}")
r = r0 + r_delta * sat_ratio
g_delta = r - g0
b_delta = r - b0
g = g0 + g_delta * grey_ratio
b = b0 + b_delta * grey_ratio
#print(f"{r} {g} {b}\n")
low_colors[i] = (r,g,b,z0)
#print(low_colors)
# combine the faded low colors with the regular spectral cmap to make a custom version
cmap_custom_spectral = ListedColormap(low_colors + spectral_colors[breakpoint:])
#spectral_colors = [cmap_custom_spectral(v/10) for v in range(10)]
#for i in range(10): print(spectral_colors[i])
#print("")
return cmap_custom_spectral
# Make the custom spectral color map the default on module init
set_custom_cmap_style()
# Arrange the stored annotations optimally and add to plot
def anno_volumetric_data(ax, depth_base=2, label='Depth',
labelpos=(0.2, 0.7), labelrot=0, type=1, fill=True):
# sort all arrays by the x point of the text (anno_offs)
global x_anno_offs, y_anno_offs, anno_labels, x_annos, y_annos
all_annos = sorted(zip(x_anno_offs, y_anno_offs, anno_labels, x_annos, y_annos))
x_anno_offs = [a for a,b,c,d,e in all_annos]
y_anno_offs = [b for a,b,c,d,e in all_annos]
anno_labels = [c for a,b,c,d,e in all_annos]
x_annos = [d for a,b,c,d,e in all_annos]
y_annos = [e for a,b,c,d,e in all_annos]
#print(f"{x_anno_offs}")
#print(f"{y_anno_offs}")
#print(f"{anno_labels}")
for i in range(len(anno_labels)):
x_anno = x_annos[i]
y_anno = y_annos[i]
x_anno_off = x_anno_offs[i]
y_anno_off = y_anno_offs[i]
label = anno_labels[i]
if i > 0:
x_delta = abs(x_anno_off - x_anno_offs[i - 1])
y_delta = abs(y_anno_off - y_anno_offs[i - 1])
if y_delta < 0.7 and x_delta < 2:
y_anno_off = y_anno_offs[i] = y_anno_offs[i - 1] - 0.6
#x_anno_off = x_anno_offs[i] = x_anno_offs[i - 1] + 0.1
ax.annotate(label,
xy=(x_anno+0.0, y_anno+0.1),
arrowprops=dict(facecolor='black', shrink=0.0,
width=0.5, headwidth=4, headlength=5, edgecolor=(0.8,0.8,0.8)),
xytext=(x_anno_off + labelpos[0], y_anno_off + labelpos[1]),
rotation=labelrot,
horizontalalignment='left', verticalalignment='baseline',
color=(0.2,0.2,0.2),
clip_on=True)
if saveplots == True:
plt.savefig("VolumetricPlotSample.jpg")
# Plot one group of data for volumetric presentation
def plot_volumetric_data(ax, w_data, d_data, f_data, depth_base=2, label='Depth',
labelpos=(0.2, 0.7), labelrot=0, type=1, fill=True, w_max=18, do_label=False, do_border=True,
x_size=1.0, y_size=1.0, zorder=1, offset_flag=False,
max_depth=0, suppress_low_fidelity=False):
# since data may come back out of order, save point at max y for annotation
i_anno = 0
x_anno = 0
y_anno = 0
# plot data rectangles
low_fidelity_count = True
last_y = -1
k = 0
# determine y-axis dimension for one pixel to use for offset of bars that start at 0
(_, dy) = get_pixel_dims(ax)
# do this loop in reverse to handle the case where earlier cells are overlapped by later cells
for i in reversed(range(len(d_data))):
x = depth_index(d_data[i], depth_base)
y = float(w_data[i])
f = f_data[i]
# each time we star a new row, reset the offset counter
# DEVNOTE: this is highly specialized for the QA area plots, where there are 8 bars
# that represent time starting from 0 secs. We offset by one pixel each and center the group
if y != last_y:
last_y = y;
k = 3 # hardcoded for 8 cells, offset by 3
#print(f"{i = } {x = } {y = }")
if max_depth > 0 and d_data[i] > max_depth:
#print(f"... excessive depth (2), skipped; w={y} d={d_data[i]}")
break;
# reject cells with low fidelity
if suppress_low_fidelity and f < suppress_low_fidelity_level:
if low_fidelity_count: break
else: low_fidelity_count = True
# the only time this is False is when doing merged gradation plots
if do_border == True:
# this case is for an array of x_sizes, i.e. each box has different width
if isinstance(x_size, list):
# draw each of the cells, with no offset
if not offset_flag:
ax.add_patch(box_at(x, y, f, type=type, fill=fill, x_size=x_size[i], y_size=y_size, zorder=zorder))
# use an offset for y value, AND account for x and width to draw starting at 0
else:
ax.add_patch(box_at((x/2 + x_size[i]/4), y + k*dy, f, type=type, fill=fill, x_size=x+ x_size[i]/2, y_size=y_size, zorder=zorder))
# this case is for only a single cell
else:
ax.add_patch(box_at(x, y, f, type=type, fill=fill, x_size=x_size, y_size=y_size))
# save the annotation point with the largest y value
if y >= y_anno:
x_anno = x
y_anno = y
i_anno = i
# move the next bar down (if using offset)
k -= 1
# if no data rectangles plotted, no need for a label
if x_anno == 0 or y_anno == 0:
return
x_annos.append(x_anno)
y_annos.append(y_anno)
anno_dist = math.sqrt( (y_anno - 1)**2 + (x_anno - 1)**2 )
# adjust radius of annotation circle based on maximum width of apps
anno_max = 10
if w_max > 10:
anno_max = 14
if w_max > 14:
anno_max = 18
scale = anno_max / anno_dist
# offset of text from end of arrow
if scale > 1:
x_anno_off = scale * x_anno - x_anno - 0.5
y_anno_off = scale * y_anno - y_anno
else:
x_anno_off = 0.7
y_anno_off = 0.5
x_anno_off += x_anno
y_anno_off += y_anno
# print(f"... {xx} {yy} {anno_dist}")
x_anno_offs.append(x_anno_off)
y_anno_offs.append(y_anno_off)
anno_labels.append(label)
if do_label:
ax.annotate(label, xy=(x_anno+labelpos[0], y_anno+labelpos[1]), rotation=labelrot,
horizontalalignment='left', verticalalignment='bottom', color=(0.2,0.2,0.2))
x_annos = []
y_annos = []
x_anno_offs = []
y_anno_offs = []
anno_labels = []
# init arrays to hold annotation points for label spreading
def vplot_anno_init ():
global x_annos, y_annos, x_anno_offs, y_anno_offs, anno_labels
x_annos = []
y_annos = []
x_anno_offs = []
y_anno_offs = []
anno_labels = []
# Number of ticks on volumetric depth axis
max_depth_log = 22
# average transpile factor between base QV depth and our depth based on results from QV notebook
QV_transpile_factor = 12.7
# format a number using K,M,B,T for large numbers, optionally rounding to 'digits' decimal places if num > 1
# (sign handling may be incorrect)
def format_number(num, digits=0):
if isinstance(num, str): num = float(num)
num = float('{:.3g}'.format(abs(num)))
sign = ''
metric = {'T': 1000000000000, 'B': 1000000000, 'M': 1000000, 'K': 1000, '': 1}
for index in metric:
num_check = num / metric[index]
if num_check >= 1:
num = round(num_check, digits)
sign = index
break
numstr = f"{str(num)}"
if '.' in numstr:
numstr = numstr.rstrip('0').rstrip('.')
return f"{numstr}{sign}"
# Return the color associated with the spcific value, using color map norm
def get_color(value):
# if there is a normalize function installed, scale the data
if cmap_norm:
value = float(cmap_norm(value))
if cmap == cmap_spectral:
value = 0.05 + value*0.9
elif cmap == cmap_blues:
value = 0.00 + value*1.0
else:
value = 0.0 + value*0.95
return cmap(value)
# Return the x and y equivalent to a single pixel for the given plot axis
def get_pixel_dims(ax):
# transform 0 -> 1 to pixel dimensions
pixdims = ax.transData.transform([(0,1),(1,0)])-ax.transData.transform((0,0))
xpix = pixdims[1][0]
ypix = pixdims[0][1]
#determine x- and y-axis dimension for one pixel
dx = (1 / xpix)
dy = (1 / ypix)
return (dx, dy)
############### Helper functions
# return the base index for a circuit depth value
# take the log in the depth base, and add 1
def depth_index(d, depth_base):
if depth_base <= 1:
return d
if d == 0:
return 0
return math.log(d, depth_base) + 1
# draw a box at x,y with various attributes
def box_at(x, y, value, type=1, fill=True, x_size=1.0, y_size=1.0, alpha=1.0, zorder=1):
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.5,0.5,0.5)
return Rectangle((x - (x_size/2), y - (y_size/2)), x_size, y_size,
alpha=alpha,
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.5*y_size,
zorder=zorder)
# draw a circle at x,y with various attributes
def circle_at(x, y, value, type=1, fill=True):
size = 1.0
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.5,0.5,0.5)
return Circle((x, y), size/2,
alpha = 0.7, # DEVNOTE: changed to 0.7 from 0.5, to handle only one cell
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.5)
def box4_at(x, y, value, type=1, fill=True, alpha=1.0):
size = 1.0
value = min(value, 1.0)
value = max(value, 0.0)
fc = get_color(value)
ec = (0.3,0.3,0.3)
ec = fc
return Rectangle((x - size/8, y - size/2), size/4, size,
alpha=alpha,
edgecolor = ec,
facecolor = fc,
fill=fill,
lw=0.1)
# Draw a Quantum Volume rectangle with specified width and depth, and grey-scale value
def qv_box_at(x, y, qv_width, qv_depth, value, depth_base):
#print(f"{qv_width} {qv_depth} {depth_index(qv_depth, depth_base)}")
return Rectangle((x - 0.5, y - 0.5), depth_index(qv_depth, depth_base), qv_width,
edgecolor = (value,value,value),
facecolor = (value,value,value),
fill=True,
lw=1)
def bkg_box_at(x, y, value=0.9):
size = 0.6
return Rectangle((x - size/2, y - size/2), size, size,
edgecolor = (.75,.75,.75),
facecolor = (value,value,value),
fill=True,
lw=0.5)
def bkg_empty_box_at(x, y):
size = 0.6
return Rectangle((x - size/2, y - size/2), size, size,
edgecolor = (.75,.75,.75),
facecolor = (1.0,1.0,1.0),
fill=True,
lw=0.5)
# Plot the background for the volumetric analysis
def plot_volumetric_background(max_qubits=11, QV=32, depth_base=2, suptitle=None, avail_qubits=0, colorbar_label="Avg Result Fidelity"):
if suptitle == None:
suptitle = f"Volumetric Positioning\nCircuit Dimensions and Fidelity Overlaid on Quantum Volume = {QV}"
QV0 = QV
qv_estimate = False
est_str = ""
if QV == 0: # QV = 0 indicates "do not draw QV background or label"
QV = 2048
elif QV < 0: # QV < 0 indicates "add est. to label"
QV = -QV
qv_estimate = True
est_str = " (est.)"
if avail_qubits > 0 and max_qubits > avail_qubits:
max_qubits = avail_qubits
max_width = 13
if max_qubits > 11: max_width = 18
if max_qubits > 14: max_width = 20
if max_qubits > 16: max_width = 24
if max_qubits > 24: max_width = 33
#print(f"... {avail_qubits} {max_qubits} {max_width}")
plot_width = 6.8
plot_height = 0.5 + plot_width * (max_width / max_depth_log)
#print(f"... {plot_width} {plot_height}")
# define matplotlib figure and axis; use constrained layout to fit colorbar to right
fig, ax = plt.subplots(figsize=(plot_width, plot_height), constrained_layout=True)
plt.suptitle(suptitle)
plt.xlim(0, max_depth_log)
plt.ylim(0, max_width)
# circuit depth axis (x axis)
xbasis = [x for x in range(1,max_depth_log)]
xround = [depth_base**(x-1) for x in xbasis]
xlabels = [format_number(x) for x in xround]
ax.set_xlabel('Circuit Depth')
ax.set_xticks(xbasis)
plt.xticks(xbasis, xlabels, color='black', rotation=45, ha='right', va='top', rotation_mode="anchor")
# other label options
#plt.xticks(xbasis, xlabels, color='black', rotation=-60, ha='left')
#plt.xticks(xbasis, xlabels, color='black', rotation=-45, ha='left', va='center', rotation_mode="anchor")
# circuit width axis (y axis)
ybasis = [y for y in range(1,max_width)]
yround = [1,2,3,4,5,6,7,8,10,12,15] # not used now
ylabels = [str(y) for y in yround] # not used now
#ax.set_ylabel('Circuit Width (Number of Qubits)')
ax.set_ylabel('Circuit Width')
ax.set_yticks(ybasis)
#create simple line plot (not used right now)
#ax.plot([0, 10],[0, 10])
log2QV = math.log2(QV)
QV_width = log2QV
QV_depth = log2QV * QV_transpile_factor
# show a quantum volume rectangle of QV = 64 e.g. (6 x 6)
if QV0 != 0:
ax.add_patch(qv_box_at(1, 1, QV_width, QV_depth, 0.87, depth_base))
else:
ax.add_patch(qv_box_at(1, 1, QV_width, QV_depth, 0.91, depth_base))
# the untranspiled version is commented out - we do not show this by default
# also show a quantum volume rectangle un-transpiled
# ax.add_patch(qv_box_at(1, 1, QV_width, QV_width, 0.80, depth_base))
# show 2D array of volumetric cells based on this QV_transpiled
# DEVNOTE: we use +1 only to make the visuals work; s/b without
# Also, the second arg of the min( below seems incorrect, needs correction
maxprod = (QV_width + 1) * (QV_depth + 1)
for w in range(1, min(max_width, round(QV) + 1)):
# don't show VB squares if width greater than known available qubits
if avail_qubits != 0 and w > avail_qubits:
continue
i_success = 0
for d in xround:
# polarization factor for low circuit widths
maxtest = maxprod / ( 1 - 1 / (2**w) )
# if circuit would fail here, don't draw box
if d > maxtest: continue
if w * d > maxtest: continue
# guess for how to capture how hardware decays with width, not entirely correct
# # reduce maxtext by a factor of number of qubits > QV_width
# # just an approximation to account for qubit distances
# if w > QV_width:
# over = w - QV_width
# maxtest = maxtest / (1 + (over/QV_width))
# draw a box at this width and depth
id = depth_index(d, depth_base)
# show vb rectangles; if not showing QV, make all hollow (or less dark)
if QV0 == 0:
#ax.add_patch(bkg_empty_box_at(id, w))
ax.add_patch(bkg_box_at(id, w, 0.95))
else:
ax.add_patch(bkg_box_at(id, w, 0.9))
# save index of last successful depth
i_success += 1
# plot empty rectangle after others
d = xround[i_success]
id = depth_index(d, depth_base)
ax.add_patch(bkg_empty_box_at(id, w))
# Add annotation showing quantum volume
if QV0 != 0:
t = ax.text(max_depth_log - 2.0, 1.5, f"QV{est_str}={QV}", size=12,
horizontalalignment='right', verticalalignment='center', color=(0.2,0.2,0.2),
bbox=dict(boxstyle="square,pad=0.3", fc=(.9,.9,.9), ec="grey", lw=1))
# add colorbar to right of plot
plt.colorbar(cm.ScalarMappable(cmap=cmap), cax=None, ax=ax,
shrink=0.6, label=colorbar_label, panchor=(0.0, 0.7))
return ax
# Function to calculate circuit depth
def calculate_circuit_depth(qc):
# Calculate the depth of the circuit
depth = qc.depth()
return depth
def calculate_transpiled_depth(qc,basis_selector):
# use either the backend or one of the basis gate sets
if basis_selector == 0:
qc = transpile(qc, backend)
else:
basis_gates = basis_gates_array[basis_selector]
qc = transpile(qc, basis_gates=basis_gates, seed_transpiler=0)
transpiled_depth = qc.depth()
return transpiled_depth,qc
def plot_fidelity_data(fidelity_data, Hf_fidelity_data, title):
avg_fidelity_means = []
avg_Hf_fidelity_means = []
avg_num_qubits_values = list(fidelity_data.keys())
# Calculate the average fidelity and Hamming fidelity for each unique number of qubits
for num_qubits in avg_num_qubits_values:
avg_fidelity = np.average(fidelity_data[num_qubits])
avg_fidelity_means.append(avg_fidelity)
avg_Hf_fidelity = np.mean(Hf_fidelity_data[num_qubits])
avg_Hf_fidelity_means.append(avg_Hf_fidelity)
return avg_fidelity_means,avg_Hf_fidelity_means
list_of_gates = []
def list_of_standardgates():
import qiskit.circuit.library as lib
from qiskit.circuit import Gate
import inspect
# List all the attributes of the library module
gate_list = dir(lib)
# Filter out non-gate classes (like functions, variables, etc.)
gates = [gate for gate in gate_list if isinstance(getattr(lib, gate), type) and issubclass(getattr(lib, gate), Gate)]
# Get method names from QuantumCircuit
circuit_methods = inspect.getmembers(QuantumCircuit, inspect.isfunction)
method_names = [name for name, _ in circuit_methods]
# Map gate class names to method names
gate_to_method = {}
for gate in gates:
gate_class = getattr(lib, gate)
class_name = gate_class.__name__.replace('Gate', '').lower() # Normalize class name
for method in method_names:
if method == class_name or method == class_name.replace('cr', 'c-r'):
gate_to_method[gate] = method
break
# Add common operations that are not strictly gates
additional_operations = {
'Measure': 'measure',
'Barrier': 'barrier',
}
gate_to_method.update(additional_operations)
for k,v in gate_to_method.items():
list_of_gates.append(v)
def update_counts(gates,custom_gates):
operations = {}
for key, value in gates.items():
operations[key] = value
for key, value in custom_gates.items():
if key in operations:
operations[key] += value
else:
operations[key] = value
return operations
def get_gate_counts(gates,custom_gate_defs):
result = gates.copy()
# Iterate over the gate counts in the quantum circuit
for gate, count in gates.items():
if gate in custom_gate_defs:
custom_gate_ops = custom_gate_defs[gate]
# Multiply custom gate operations by the count of the custom gate in the circuit
for _ in range(count):
result = update_counts(result, custom_gate_ops)
# Remove the custom gate entry as we have expanded it
del result[gate]
return result
dict_of_qc = dict()
custom_gates_defs = dict()
# Function to count operations recursively
def count_operations(qc):
dict_of_qc.clear()
circuit_traverser(qc)
operations = dict()
operations = dict_of_qc[qc.name]
del dict_of_qc[qc.name]
# print("operations :",operations)
# print("dict_of_qc :",dict_of_qc)
for keys in operations.keys():
if keys not in list_of_gates:
for k,v in dict_of_qc.items():
if k in operations.keys():
custom_gates_defs[k] = v
operations=get_gate_counts(operations,custom_gates_defs)
custom_gates_defs.clear()
return operations
def circuit_traverser(qc):
dict_of_qc[qc.name]=dict(qc.count_ops())
for i in qc.data:
if str(i.operation.name) not in list_of_gates:
qc_1 = i.operation.definition
circuit_traverser(qc_1)
def get_memory():
import resource
usage = resource.getrusage(resource.RUSAGE_SELF)
max_mem = usage.ru_maxrss/1024 #in MB
return max_mem
def analyzer(qc,references,num_qubits):
# total circuit name (pauli string + coefficient)
total_name = qc.name
# pauli string
pauli_string = total_name.split()[0]
# get the correct measurement
if (len(total_name.split()) == 2):
correct_dist = references[pauli_string]
else:
circuit_id = int(total_name.split()[2])
correct_dist = references[f"Qubits - {num_qubits} - {circuit_id}"]
return correct_dist,total_name
# Max qubits must be 12 since the referenced files only go to 12 qubits
MAX_QUBITS = 12
method = 1
def run (min_qubits=min_qubits, max_qubits=max_qubits, skip_qubits=2,
max_circuits=max_circuits, num_shots=num_shots):
creation_times = []
elapsed_times = []
quantum_times = []
circuit_depths = []
transpiled_depths = []
fidelity_data = {}
Hf_fidelity_data = {}
numckts = []
mem_usage = []
algorithmic_1Q_gate_counts = []
algorithmic_2Q_gate_counts = []
transpiled_1Q_gate_counts = []
transpiled_2Q_gate_counts = []
print(f"{benchmark_name} Benchmark Program - {platform}")
#defining all the standard gates supported by qiskit in a list
if gate_counts_plots == True:
list_of_standardgates()
max_qubits = max(max_qubits, min_qubits) # max must be >= min
# validate parameters (smallest circuit is 4 qubits and largest is 10 qubits)
max_qubits = min(max_qubits, MAX_QUBITS)
min_qubits = min(max(4, min_qubits), max_qubits)
if min_qubits % 2 == 1: min_qubits += 1 # min_qubits must be even
skip_qubits = max(1, skip_qubits)
if method == 2: max_circuits = 1
if max_qubits < 4:
print(f"Max number of qubits {max_qubits} is too low to run method {method} of VQE algorithm")
return
global max_ckts
max_ckts = max_circuits
global min_qbits,max_qbits,skp_qubits
min_qbits = min_qubits
max_qbits = max_qubits
skp_qubits = skip_qubits
print(f"min, max qubits = {min_qubits} {max_qubits}")
# Execute Benchmark Program N times for multiple circuit sizes
for input_size in range(min_qubits, max_qubits + 1, skip_qubits):
# reset random seed
np.random.seed(0)
# determine the number of circuits to execute for this group
num_circuits = min(3, max_circuits)
num_qubits = input_size
fidelity_data[num_qubits] = []
Hf_fidelity_data[num_qubits] = []
# decides number of electrons
na = int(num_qubits/4)
nb = int(num_qubits/4)
# random seed
np.random.seed(0)
numckts.append(num_circuits)
# create the circuit for given qubit size and simulation parameters, store time metric
ts = time.time()
# circuit list
qc_list = []
# Method 1 (default)
if method == 1:
# loop over circuits
for circuit_id in range(num_circuits):
# construct circuit
qc_single = VQEEnergy(num_qubits, na, nb, circuit_id, method)
qc_single.name = qc_single.name + " " + str(circuit_id)
# add to list
qc_list.append(qc_single)
# method 2
elif method == 2:
# construct all circuits
qc_list = VQEEnergy(num_qubits, na, nb, 0, method)
print(qc_list)
print(f"************\nExecuting [{len(qc_list)}] circuits with num_qubits = {num_qubits}")
for qc in qc_list:
print("*********************************************")
#print(f"qc of {qc} qubits for qc_list value: {qc_list}")
# get circuit id
if method == 1:
circuit_id = qc.name.split()[2]
else:
circuit_id = qc.name.split()[0]
#creation time
creation_time = time.time() - ts
creation_times.append(creation_time)
#print(qc)
print(f"creation time = {creation_time*1000} ms")
# Calculate gate count for the algorithmic circuit (excluding barriers and measurements)
if gate_counts_plots == True:
operations = count_operations(qc)
n1q = 0; n2q = 0
if operations != None:
for key, value in operations.items():
if key == "measure": continue
if key == "barrier": continue
if key.startswith("c") or key.startswith("mc"):
n2q += value
else:
n1q += value
print("operations: ",operations)
algorithmic_1Q_gate_counts.append(n1q)
algorithmic_2Q_gate_counts.append(n2q)
# collapse the sub-circuit levels used in this benchmark (for qiskit)
qc=qc.decompose()
#print(qc)
# Calculate circuit depth
depth = calculate_circuit_depth(qc)
circuit_depths.append(depth)
# Calculate transpiled circuit depth
transpiled_depth,qc = calculate_transpiled_depth(qc,basis_selector)
transpiled_depths.append(transpiled_depth)
#print(qc)
print(f"Algorithmic Depth = {depth} and Normalized Depth = {transpiled_depth}")
if gate_counts_plots == True:
# Calculate gate count for the transpiled circuit (excluding barriers and measurements)
tr_ops = qc.count_ops()
#print("tr_ops = ",tr_ops)
tr_n1q = 0; tr_n2q = 0
if tr_ops != None:
for key, value in tr_ops.items():
if key == "measure": continue
if key == "barrier": continue
if key.startswith("c"): tr_n2q += value
else: tr_n1q += value
transpiled_1Q_gate_counts.append(tr_n1q)
transpiled_2Q_gate_counts.append(tr_n2q)
print(f"Algorithmic 1Q gates = {n1q} ,Algorithmic 2Q gates = {n2q}")
print(f"Normalized 1Q gates = {tr_n1q} ,Normalized 2Q gates = {tr_n2q}")
#execution
if Type_of_Simulator == "built_in":
#To check if Noise is required
if Noise_Inclusion == True:
noise_model = noise_parameters
else:
noise_model = None
ts = time.time()
job = execute(qc, backend, shots=num_shots, noise_model=noise_model)
elif Type_of_Simulator == "FAKE" or Type_of_Simulator == "FAKEV2" :
ts = time.time()
job = backend.run(qc,shots=num_shots, noise_model=noise_parameters)
#retrieving the result
result = job.result()
#print(result)
#calculating elapsed time
elapsed_time = time.time() - ts
elapsed_times.append(elapsed_time)
# Calculate quantum processing time
quantum_time = result.time_taken
quantum_times.append(quantum_time)
print(f"Elapsed time = {elapsed_time*1000} ms and Quantum Time = {quantum_time*1000} ms")
#counts in result object
counts = result.get_counts()
# load pre-computed data
if len(qc.name.split()) == 2:
filename = os.path.join(f'_common/precalculated_data_{num_qubits}_qubit.json')
with open(filename) as f:
references = json.load(f)
else:
filename = os.path.join(f'_common/precalculated_data_{num_qubits}_qubit_method2.json')
with open(filename) as f:
references = json.load(f)
#Correct distribution to compare with counts
correct_dist,total_name = analyzer(qc,references,num_qubits)
#fidelity calculation comparision of counts and correct_dist
fidelity_dict = polarization_fidelity(counts, correct_dist)
print(fidelity_dict)
# modify fidelity based on the coefficient
if (len(total_name.split()) == 2):
fidelity_dict *= ( abs(float(total_name.split()[1])) / normalization )
fidelity_data[num_qubits].append(fidelity_dict['fidelity'])
Hf_fidelity_data[num_qubits].append(fidelity_dict['hf_fidelity'])
#maximum memory utilization (if required)
if Memory_utilization_plot == True:
max_mem = get_memory()
print(f"Maximum Memory Utilized: {max_mem} MB")
mem_usage.append(max_mem)
print("*********************************************")
##########
# print a sample circuit
print("Sample Circuit:"); print(QC_ if QC_ != None else " ... too large!")
print("\nHartree Fock Generator 'Hf' ="); print(Hf_ if Hf_ != None else " ... too large!")
print("\nCluster Operator Example 'Cluster Op' ="); print(CO_ if CO_ != None else " ... too large!")
return (creation_times, elapsed_times, quantum_times, circuit_depths, transpiled_depths,
fidelity_data, Hf_fidelity_data, numckts , algorithmic_1Q_gate_counts, algorithmic_2Q_gate_counts,
transpiled_1Q_gate_counts, transpiled_2Q_gate_counts,mem_usage)
# Execute the benchmark program, accumulate metrics, and calculate circuit depths
(creation_times, elapsed_times, quantum_times, circuit_depths,transpiled_depths, fidelity_data, Hf_fidelity_data, numckts,
algorithmic_1Q_gate_counts, algorithmic_2Q_gate_counts, transpiled_1Q_gate_counts, transpiled_2Q_gate_counts,mem_usage) = run()
# Define the range of qubits for the x-axis
num_qubits_range = range(min_qbits, max_qbits+1,skp_qubits)
print("num_qubits_range =",num_qubits_range)
# Calculate average creation time, elapsed time, quantum processing time, and circuit depth for each number of qubits
avg_creation_times = []
avg_elapsed_times = []
avg_quantum_times = []
avg_circuit_depths = []
avg_transpiled_depths = []
avg_1Q_algorithmic_gate_counts = []
avg_2Q_algorithmic_gate_counts = []
avg_1Q_Transpiled_gate_counts = []
avg_2Q_Transpiled_gate_counts = []
max_memory = []
start = 0
for num in numckts:
avg_creation_times.append(np.mean(creation_times[start:start+num]))
avg_elapsed_times.append(np.mean(elapsed_times[start:start+num]))
avg_quantum_times.append(np.mean(quantum_times[start:start+num]))
avg_circuit_depths.append(np.mean(circuit_depths[start:start+num]))
avg_transpiled_depths.append(np.mean(transpiled_depths[start:start+num]))
if gate_counts_plots == True:
avg_1Q_algorithmic_gate_counts.append(int(np.mean(algorithmic_1Q_gate_counts[start:start+num])))
avg_2Q_algorithmic_gate_counts.append(int(np.mean(algorithmic_2Q_gate_counts[start:start+num])))
avg_1Q_Transpiled_gate_counts.append(int(np.mean(transpiled_1Q_gate_counts[start:start+num])))
avg_2Q_Transpiled_gate_counts.append(int(np.mean(transpiled_2Q_gate_counts[start:start+num])))
if Memory_utilization_plot == True:max_memory.append(np.max(mem_usage[start:start+num]))
start += num
# Calculate the fidelity data
avg_f, avg_Hf = plot_fidelity_data(fidelity_data, Hf_fidelity_data, "Fidelity Comparison")
# Plot histograms for average creation time, average elapsed time, average quantum processing time, and average circuit depth versus the number of qubits
# Add labels to the bars
def autolabel(rects,ax,str='{:.3f}',va='top',text_color="black"):
for rect in rects:
height = rect.get_height()
ax.annotate(str.format(height), # Formatting to two decimal places
xy=(rect.get_x() + rect.get_width() / 2, height / 2),
xytext=(0, 0),
textcoords="offset points",
ha='center', va=va,color=text_color,rotation=90)
bar_width = 0.3
# Determine the number of subplots and their arrangement
if Memory_utilization_plot and gate_counts_plots:
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7) = plt.subplots(7, 1, figsize=(18, 30))
# Plotting for both memory utilization and gate counts
# ax1, ax2, ax3, ax4, ax5, ax6, ax7 are available
elif Memory_utilization_plot:
fig, (ax1, ax2, ax3, ax6, ax7) = plt.subplots(5, 1, figsize=(18, 30))
# Plotting for memory utilization only
# ax1, ax2, ax3, ax6, ax7 are available
elif gate_counts_plots:
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, figsize=(18, 30))
# Plotting for gate counts only
# ax1, ax2, ax3, ax4, ax5, ax6 are available
else:
fig, (ax1, ax2, ax3, ax6) = plt.subplots(4, 1, figsize=(18, 30))
# Default plotting
# ax1, ax2, ax3, ax6 are available
fig.suptitle(f"General Benchmarks : {platform} - {benchmark_name}", fontsize=16)
for i in range(len(avg_creation_times)): #converting seconds to milli seconds by multiplying 1000
avg_creation_times[i] *= 1000
ax1.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
x = ax1.bar(num_qubits_range, avg_creation_times, color='deepskyblue')
autolabel(ax1.patches, ax1)
ax1.set_xlabel('Number of Qubits')
ax1.set_ylabel('Average Creation Time (ms)')
ax1.set_title('Average Creation Time vs Number of Qubits',fontsize=14)
ax2.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
for i in range(len(avg_elapsed_times)): #converting seconds to milli seconds by multiplying 1000
avg_elapsed_times[i] *= 1000
for i in range(len(avg_quantum_times)): #converting seconds to milli seconds by multiplying 1000
avg_quantum_times[i] *= 1000
Elapsed= ax2.bar(np.array(num_qubits_range) - bar_width / 2, avg_elapsed_times, width=bar_width, color='cyan', label='Elapsed Time')
Quantum= ax2.bar(np.array(num_qubits_range) + bar_width / 2, avg_quantum_times,width=bar_width, color='deepskyblue',label ='Quantum Time')
autolabel(Elapsed,ax2,str='{:.1f}')
autolabel(Quantum,ax2,str='{:.1f}')
ax2.set_xlabel('Number of Qubits')
ax2.set_ylabel('Average Time (ms)')
ax2.set_title('Average Time vs Number of Qubits')
ax2.legend()
ax3.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized = ax3.bar(np.array(num_qubits_range) - bar_width / 2, avg_transpiled_depths, color='cyan', label='Normalized Depth', width=bar_width) # Adjust width here
Algorithmic = ax3.bar(np.array(num_qubits_range) + bar_width / 2,avg_circuit_depths, color='deepskyblue', label='Algorithmic Depth', width=bar_width) # Adjust width here
autolabel(Normalized,ax3,str='{:.2f}')
autolabel(Algorithmic,ax3,str='{:.2f}')
ax3.set_xlabel('Number of Qubits')
ax3.set_ylabel('Average Circuit Depth')
ax3.set_title('Average Circuit Depth vs Number of Qubits')
ax3.legend()
if gate_counts_plots == True:
ax4.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized_1Q_counts = ax4.bar(np.array(num_qubits_range) - bar_width / 2, avg_1Q_Transpiled_gate_counts, color='cyan', label='Normalized Gate Counts', width=bar_width) # Adjust width here
Algorithmic_1Q_counts = ax4.bar(np.array(num_qubits_range) + bar_width / 2, avg_1Q_algorithmic_gate_counts, color='deepskyblue', label='Algorithmic Gate Counts', width=bar_width) # Adjust width here
autolabel(Normalized_1Q_counts,ax4,str='{}')
autolabel(Algorithmic_1Q_counts,ax4,str='{}')
ax4.set_xlabel('Number of Qubits')
ax4.set_ylabel('Average 1-Qubit Gate Counts')
ax4.set_title('Average 1-Qubit Gate Counts vs Number of Qubits')
ax4.legend()
ax5.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Normalized_2Q_counts = ax5.bar(np.array(num_qubits_range) - bar_width / 2, avg_2Q_Transpiled_gate_counts, color='cyan', label='Normalized Gate Counts', width=bar_width) # Adjust width here
Algorithmic_2Q_counts = ax5.bar(np.array(num_qubits_range) + bar_width / 2, avg_2Q_algorithmic_gate_counts, color='deepskyblue', label='Algorithmic Gate Counts', width=bar_width) # Adjust width here
autolabel(Normalized_2Q_counts,ax5,str='{}')
autolabel(Algorithmic_2Q_counts,ax5,str='{}')
ax5.set_xlabel('Number of Qubits')
ax5.set_ylabel('Average 2-Qubit Gate Counts')
ax5.set_title('Average 2-Qubit Gate Counts vs Number of Qubits')
ax5.legend()
ax6.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
Hellinger = ax6.bar(np.array(num_qubits_range) - bar_width / 2, avg_Hf, width=bar_width, label='Hellinger Fidelity',color='cyan') # Adjust width here
Normalized = ax6.bar(np.array(num_qubits_range) + bar_width / 2, avg_f, width=bar_width, label='Normalized Fidelity', color='deepskyblue') # Adjust width here
autolabel(Hellinger,ax6,str='{:.2f}')
autolabel(Normalized,ax6,str='{:.2f}')
ax6.set_xlabel('Number of Qubits')
ax6.set_ylabel('Average Value')
ax6.set_title("Fidelity Comparison")
ax6.legend()
if Memory_utilization_plot == True:
ax7.set_xticks(range(min(num_qubits_range), max(num_qubits_range)+1, skp_qubits))
x = ax7.bar(num_qubits_range, max_memory, color='turquoise', width=bar_width, label="Memory Utilizations")
autolabel(ax7.patches, ax7)
ax7.set_xlabel('Number of Qubits')
ax7.set_ylabel('Maximum Memory Utilized (MB)')
ax7.set_title('Memory Utilized vs Number of Qubits',fontsize=14)
plt.tight_layout(rect=[0, 0, 1, 0.96])
if saveplots == True:
plt.savefig("ParameterPlotsSample.jpg")
plt.show()
# Quantum Volume Plot
Suptitle = f"Volumetric Positioning - {platform}"
appname=benchmark_name
if QV_ == None:
QV=2048
else:
QV=QV_
depth_base =2
ax = plot_volumetric_background(max_qubits=max_qbits, QV=QV,depth_base=depth_base, suptitle=Suptitle, colorbar_label="Avg Result Fidelity")
w_data = num_qubits_range
# determine width for circuit
w_max = 0
for i in range(len(w_data)):
y = float(w_data[i])
w_max = max(w_max, y)
d_tr_data = avg_transpiled_depths
f_data = avg_f
plot_volumetric_data(ax, w_data, d_tr_data, f_data, depth_base, fill=True,label=appname, labelpos=(0.4, 0.6), labelrot=15, type=1, w_max=w_max)
anno_volumetric_data(ax, depth_base,label=appname, labelpos=(0.4, 0.6), labelrot=15, type=1, fill=False)
|
https://github.com/QuCO-CSAM/Solving-Combinatorial-Optimisation-Problems-Using-Quantum-Algorithms
|
QuCO-CSAM
|
import numpy as np
from itertools import permutations
import gzip
from qiskit import*
import time
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import QAOA
from qiskit.aqua.components.optimizers import SPSA
# from qiskit.aqua.components.variational_forms import RY
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.optimizers import COBYLA
from qiskit import IBMQ
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.optimization.applications.ising.tsp import TspData
import qiskit.optimization.applications.ising.tsp as tsp
from qiskit.circuit.library import TwoLocal
from qiskit.circuit.library import RealAmplitudes
# # # Load account from disk
IBMQ.load_account()
IBMQ.providers()
def readInData():
"""
Output G : N by N distance matrix from Matrices1a.txt file.
"""
G = []
p = [3,4,5,6,7,8,9,10,11]
q = [i**(2) for i in p ]
m = 0
v = open("Matrices.txt" , "r")
w = v.read().split()
for i in range (len(w)):
w[i] = int(float(w[i]))
for i in range (len(q)):
G.append(np.reshape(w[m:m+q[i]] , (p[i] , p[i])))
m = m + q[i]
return G
distanceMatrix = readInData() #Array of different sized matrices
def determineIfFeasible(result):
"""
Determines if eigenstate is feasible or infeasible.
Output: arr = Infeasible if eiegenstate is infeasible or arr = binary array of feasible solution
"""
data = sorted(result['eigenstate'].items(), key=lambda item: item[1])[::-1]
for i in range(len(data)):
a = tsp.tsp_feasible(data[i][0])
arr = 'Infeasible'
if a == True:
b = str(data[i][0])
arr = [b , data[i][1]]
break
return arr
def optimal(a,b,c,f,u):
"""
Read in data of initial optimal point that will be used in the quantum algorithm
"""
openfile = open("optimal.txt" , "r")
readFile = openfile.read().split()
t = []
for i in readFile:
if i != ',':
q = len(i)
t.append(float(i[0:q-1]))
v, r, o, d, z = np.array(t[0:a]), np.array(t[a:a+b]), np.array(t[a+b : a+b+c]), np.array(t[a+b+c:a+b+c+f]), np.array(t[a+b+c+f:a+b+c+f+u])
return [v,r,o,d,z]
R = optimal(54,96,100,216,294) #Array of corresponding initial points
def quantumApproximateOptimizationAlgorithm(numIter, numShots, distanceMatrix,pValue, deviceName, initialPoint):
"""
Implementation of the QAOA
Output: classial TSP solution (total length of tour), time taken to execute algorithm
"""
# Map problem to isining hamiltonian
x = TspData('tmp',len(distanceMatrix),np.zeros((3,3)),distanceMatrix)
qubitOp = tsp.get_operator(x)
seed = 10598
spsa = SPSA(maxiter = numIter)
qaoa = QAOA(qubitOp, spsa, pValue, include_custom = False, initialPoint = initialPoint)
my_provider = IBMQ.get_provider(ibm_hub) #Replace ibm_hub with appropriate qiskit hub name
device = my_provider.get_backend(deviceName) # deviceName is the Device of IBM qunatum device in a string
quantum_instance = QuantumInstance(device, seed_simulator=seed, seed_transpiler=seed,shots = numShots,
skip_qobj_validation = False)
#Convert quantum result into its classical form and determine if feasible or infeasible
result = qaoa.run(quantum_instance)
answer = determineIfFeasible(result)
if answer == 'Infeasible':
solution = -1
else:
binarry = [int(p) for p in answer[0]]
route = tsp.get_tsp_solution(binarry)
solution = tsp.tsp_value(route,distanceMatrix)
return solution, result['optimizer_time']
## Example for 3 by 3 instance implemented using QAOA:
numIter = 1
numShots = 8192
distanceMatrix = distanceMatrix[0]
pValue = 3
deviceName = 'ibmq_manhattan'
initialPoint = R[0]
finalResult quantumApproximateOptimizationAlgorithm(numIter, numShots, distanceMatrix, pValue, deviceName, initialPoint)
|
https://github.com/QuCO-CSAM/Solving-Combinatorial-Optimisation-Problems-Using-Quantum-Algorithms
|
QuCO-CSAM
|
# Important libraries and modules
import numpy as np
from itertools import permutations
import gzip
from qiskit import*
import time
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import QAOA
from qiskit.aqua.components.optimizers import SPSA
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.optimizers import COBYLA
from qiskit import IBMQ
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.optimization.applications.ising.tsp import TspData
import qiskit.optimization.applications.ising.tsp as tsp
from qiskit.circuit.library import TwoLocal
from qiskit.circuit.library import RealAmplitudes
# # Load account from disk
IBMQ.load_account()
IBMQ.providers()
def readInData():
"""
Output G : N by N distance matrix from Matrices1a.txt file.
"""
G = []
p = [3, 4,5,6,7,8,9,10,11]
q = [i**(2) for i in p ]
m = 0
v = open("Matrices.txt" , "r")
w = v.read().split()
for i in range (len(w)):
w[i] = int(float(w[i]))
for i in range (len(q)):
G.append(np.reshape(w[m:m+q[i]] , (p[i] , p[i])))
m = m + q[i]
return G
distanceMatrix = readInData() #Array of different sized matrices
def determineIfFeasible(result):
"""
Determines if eigenstate is feasible or infeasible.
Output: arr = Infeasible if eiegenstate is infeasible or arr = binary array of feasible solution
"""
data = sorted(result['eigenstate'].items(), key=lambda item: item[1])[::-1]
for i in range(len(data)):
a = tsp.tsp_feasible(data[i][0])
arr = 'Infeasible'
if a == True:
b = str(data[i][0])
arr = [b , data[i][1]]
break
return arr
def optimal(a,b,c,f,u):
"""
Read in data of initial optimal point that will be used in the quantum algorithm
"""
openfile = open("optimal.txt" , "r")
readFile = openfile.read().split()
t = []
for i in readFile:
if i != ',':
q = len(i)
t.append(float(i[0:q-1]))
v, r, o, d, z = np.array(t[0:a]), np.array(t[a:a+b]), np.array(t[a+b : a+b+c]), np.array(t[a+b+c:a+b+c+f]), np.array(t[a+b+c+f:a+b+c+f+u])
return [v,r,o,d,z]
R = optimal(54,96,100,216,294) #Array of corresponding initial points
def variationalQuantumEigensolver(numIter,numShots,distanceMatrix, varForm, initialPoint, deviceName):
"""
Implementation of the VQE
Output: classial TSP solution (total length of tour) and time taken to execute algorithm
"""
# Mappining of problem to ising hamiltonian
x = TspData('tmp',len(matrix),np.zeros((3,3)),distanceMatrix)
qubitOp ,offset = tsp.get_operator(x)
seed = 10598
# Generate a circuit
spsa = SPSA(maxiter = numIter)
if varForm == 'vf1':
ry = RealAmplitudes(qubitOp.num_qubits, entanglement='linear')
elif varForm == 'vf2':
ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=5, entanglement='linear')
runVqe = VQE(qubitOp, ry, spsa,include_custom=True, initial_point = initialPoint)
my_provider = IBMQ.get_provider(ibm_hub) #Replace ibm_hub with appropriate qiskit hub name
device = my_provider.get_backend(deviceName) # deviceName is the Device of IBM quantum device in a string
quantum_instance = QuantumInstance(device, seed_simulator=seed, seed_transpiler=seed,shots = numShots,
skip_qobj_validation = False)
result = runVqe.run(quantum_instance)
#Convert quantum result into its classical form and determine if feasible or infeasible
answer = determineIfFeasible(result)
if answer == 'Infeasible':
solution = -1
else:
binarry = [int(p) for p in answer[0]]
route = tsp.get_tsp_solution(binarry)
solution = tsp.tsp_value(route, distanceMatrix)
return solution, result['optimizer_time']
## Example for 3 by 3 instance implemented using VQE:
numIter = 1
numShots = 1024
distanceMatrix = distanceMatrix[0]
varForm = 'vf1' #vf1 indicates the RealAmplitude form
deviceName = 'ibmq_cambridge'
initialPoint = R[0]
finalResult = variationalQuantumEigensolver(numIter,numShots, distanceMatrix, varForm, initialPoint, deviceName)
|
https://github.com/QuCO-CSAM/Solving-Combinatorial-Optimisation-Problems-Using-Quantum-Algorithms
|
QuCO-CSAM
|
import numpy as np
from itertools import permutations
import gzip
from qiskit import*
import time
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import QAOA
from qiskit.aqua.components.optimizers import SPSA
# from qiskit.aqua.components.variational_forms import RY
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.optimizers import COBYLA
from qiskit import IBMQ
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.optimization.applications.ising.tsp import TspData
import qiskit.optimization.applications.ising.tsp as tsp
from qiskit.circuit.library import TwoLocal
from qiskit.circuit.library import RealAmplitudes
# # from qiskit import IBMQ
# # # Load account from disk
IBMQ.load_account()
IBMQ.providers()
def readInData():
"""
Output G : N by N distance matrix from Matrices1a.txt file.
"""
G = []
p = [3,4,5,6,7,8,9,10,11]
q = [i**(2) for i in p ]
m = 0
v = open("Matrices.txt" , "r")
w = v.read().split()
for i in range (len(w)):
w[i] = int(float(w[i]))
for i in range (len(q)):
G.append(np.reshape(w[m:m+q[i]] , (p[i] , p[i])))
m = m + q[i]
return G
distanceMatrix = readInData() #Array of different sized matrices
def determineIfFeasible(result):
"""
Determines if eigenstate is feasible or infeasible.
Output: arr = Infeasible if eiegenstate is infeasible or arr = binary array of feasible solution
"""
data = sorted(result['eigenstate'].items(), key=lambda item: item[1])[::-1]
for i in range(len(data)):
a = tsp.tsp_feasible(data[i][0])
arr = 'Infeasible'
if a == True:
b = str(data[i][0])
arr = [b , data[i][1]]
break
return arr
def optimal(a,b,c,f,u):
"""
Read in data of initial optimal point that will be used in the quantum algorithm
"""
openfile = open("optimal.txt" , "r")
readFile = openfile.read().split()
t = []
for i in readFile:
if i != ',':
q = len(i)
t.append(float(i[0:q-1]))
v, r, o, d, z = np.array(t[0:a]), np.array(t[a:a+b]), np.array(t[a+b : a+b+c]), np.array(t[a+b+c:a+b+c+f]), np.array(t[a+b+c+f:a+b+c+f+u])
return [v,r,o,d,z]
R = optimal(54,96,100,216,294) #Array of corresponding initial points
def quantumApproximateOptimizationAlgorithm(numIter, numShots, distanceMatrix,pValue, deviceName, initialPoint):
"""
Implementation of the QAOA
Output: classial TSP solution (total length of tour), time taken to execute algorithm
"""
# Map problem to isining hamiltonian
x = TspData('tmp',len(distanceMatrix),np.zeros((3,3)),distanceMatrix)
qubitOp = tsp.get_operator(x)
seed = 10598
spsa = SPSA(maxiter = numIter)
qaoa = QAOA(qubitOp, spsa, pValue, include_custom = False, initialPoint = initialPoint)
my_provider = IBMQ.get_provider(ibm_hub) #Replace ibm_hub with appropriate qiskit hub name
device = my_provider.get_backend(deviceName) # deviceName is the Device of IBM qunatum device in a string
quantum_instance = QuantumInstance(device, seed_simulator=seed, seed_transpiler=seed,shots = numShots,
skip_qobj_validation = False)
#Convert quantum result into its classical form and determine if feasible or infeasible
result = qaoa.run(quantum_instance)
answer = determineIfFeasible(result)
if answer == 'Infeasible':
solution = -1
else:
binarry = [int(p) for p in answer[0]]
route = tsp.get_tsp_solution(binarry)
solution = tsp.tsp_value(route,distanceMatrix)
return solution, result['optimizer_time']
## Example for 3 by 3 instance implemented using QAOA:
numIter = 1
numShots = 8192
distanceMatrix = distanceMatrix[0]
pValue = 3
deviceName = 'ibmq_manhattan'
initialPoint = R[0]
finalResult quantumApproximateOptimizationAlgorithm(numIter, numShots, distanceMatrix, pValue, deviceName, initialPoint)
|
https://github.com/QuCO-CSAM/Solving-Combinatorial-Optimisation-Problems-Using-Quantum-Algorithms
|
QuCO-CSAM
|
# Important libraries and modules
import numpy as np
from itertools import permutations
import gzip
from qiskit import*
import time
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.algorithms import QAOA
from qiskit.aqua.components.optimizers import SPSA
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.optimizers import COBYLA
from qiskit import IBMQ
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.optimization.applications.ising.tsp import TspData
import qiskit.optimization.applications.ising.tsp as tsp
from qiskit.circuit.library import TwoLocal
from qiskit.circuit.library import RealAmplitudes
# # Load account from disk
IBMQ.load_account()
IBMQ.providers()
def readInData():
"""
Output G : N by N distance matrix from Matrices1a.txt file.
"""
G = []
p = [3, 4,5,6,7,8,9,10,11]
q = [i**(2) for i in p ]
m = 0
v = open("Matrices.txt" , "r")
w = v.read().split()
for i in range (len(w)):
w[i] = int(float(w[i]))
for i in range (len(q)):
G.append(np.reshape(w[m:m+q[i]] , (p[i] , p[i])))
m = m + q[i]
return G
distanceMatrix = readInData() #Array of different sized matrices
def determineIfFeasible(result):
"""
Determines if eigenstate is feasible or infeasible.
Output: arr = Infeasible if eiegenstate is infeasible or arr = binary array of feasible solution
"""
data = sorted(result['eigenstate'].items(), key=lambda item: item[1])[::-1]
for i in range(len(data)):
a = tsp.tsp_feasible(data[i][0])
arr = 'Infeasible'
if a == True:
b = str(data[i][0])
arr = [b , data[i][1]]
break
return arr
def optimal(a,b,c,f,u):
"""
Read in data of initial optimal point that will be used in the quantum algorithm
"""
openfile = open("optimal.txt" , "r")
readFile = openfile.read().split()
t = []
for i in readFile:
if i != ',':
q = len(i)
t.append(float(i[0:q-1]))
v, r, o, d, z = np.array(t[0:a]), np.array(t[a:a+b]), np.array(t[a+b : a+b+c]), np.array(t[a+b+c:a+b+c+f]), np.array(t[a+b+c+f:a+b+c+f+u])
return [v,r,o,d,z]
R = optimal(54,96,100,216,294) #Array of corresponding initial points
def variationalQuantumEigensolver(numIter,numShots,distanceMatrix, varForm, initialPoint, deviceName):
"""
Implementation of the VQE
Output: classial TSP solution (total length of tour) and time taken to execute algorithm
"""
# Mappining of problem to ising hamiltonian
x = TspData('tmp',len(matrix),np.zeros((3,3)),distanceMatrix)
qubitOp ,offset = tsp.get_operator(x)
seed = 10598
# Generate a circuit
spsa = SPSA(maxiter = numIter)
if varForm == 'vf1':
ry = RealAmplitudes(qubitOp.num_qubits, entanglement='linear')
elif varForm == 'vf2':
ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=5, entanglement='linear')
runVqe = VQE(qubitOp, ry, spsa,include_custom=True, initial_point = initialPoint)
my_provider = IBMQ.get_provider(ibm_hub) #Replace ibm_hub with appropriate qiskit hub name
device = my_provider.get_backend(deviceName) # deviceName is the Device of IBM quantum device in a string
quantum_instance = QuantumInstance(device, seed_simulator=seed, seed_transpiler=seed,shots = numShots,
skip_qobj_validation = False)
result = runVqe.run(quantum_instance)
#Convert quantum result into its classical form and determine if feasible or infeasible
answer = determineIfFeasible(result)
if answer == 'Infeasible':
solution = -1
else:
binarry = [int(p) for p in answer[0]]
route = tsp.get_tsp_solution(binarry)
solution = tsp.tsp_value(route, distanceMatrix)
return solution, result['optimizer_time']
## Example for 3 by 3 instance implemented using VQE:
numIter = 1
numShots = 1024
distanceMatrix = distanceMatrix[0]
varForm = 'vf1' #vf1 indicates the RealAmplitude form
deviceName = 'ibmq_cambridge'
initialPoint = R[0]
finalResult = variationalQuantumEigensolver(numIter,numShots, distanceMatrix, varForm, initialPoint, deviceName)
|
https://github.com/MuhammadMiqdadKhan/Solution-of-IBM-s-Global-Quantum-Challenge-2020
|
MuhammadMiqdadKhan
|
# Cell 1
import numpy as np
from qiskit import Aer, QuantumCircuit, execute
from qiskit.visualization import plot_histogram
from IPython.display import display, Math, Latex
from may4_challenge import plot_state_qsphere
from may4_challenge.ex1 import minicomposer
from may4_challenge.ex1 import check1, check2, check3, check4, check5, check6, check7, check8
from may4_challenge.ex1 import return_state, vec_in_braket, statevec
# Cell 2
# press shift + return to run this code cell
# then, click on the gate that you want to apply to your qubit
# next, you have to choose the qubit that you want to apply it to (choose '0' here)
# click on clear to restart
minicomposer(1, dirac=True, qsphere=True)
# Cell 3
def create_circuit():
qc = QuantumCircuit(1)
qc.x(0)
return qc
# check solution
qc = create_circuit()
state = statevec(qc)
check1(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# Cell 4
def create_circuit2():
qc = QuantumCircuit(1)
qc.h(0)
return qc
qc = create_circuit2()
state = statevec(qc)
check2(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# Cell 5
def create_circuit3():
qc = QuantumCircuit(1)
qc.x(0)
qc.h(0)
return qc
qc = create_circuit3()
state = statevec(qc)
check3(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# Cell 6
def create_circuit4():
qc = QuantumCircuit(1)
qc.h(0)
qc.sdg(0)
return qc
qc = create_circuit4()
state = statevec(qc)
check4(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# Cell 7
# press shift + return to run this code cell
# then, click on the gate that you want to apply followed by the qubit(s) that you want it to apply to
# for controlled gates, the first qubit you choose is the control qubit and the second one the target qubit
# click on clear to restart
minicomposer(2, dirac = True, qsphere = True)
# Cell 8
def create_circuit():
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
return qc
qc = create_circuit()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check5(state)
qc.draw(output='mpl') # we draw the circuit
# Cell 9
def create_circuit6():
qc = QuantumCircuit(2,2) # this time, we not only want two qubits, but also
# two classical bits for the measurement later
qc.h(0)
qc.x(1)
qc.cx(0, 1)
qc.z(0)
return qc
qc = create_circuit6()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check6(state)
qc.measure(0, 0) # we perform a measurement on qubit q_0 and store the information on the classical bit c_0
qc.measure(1, 1) # we perform a measurement on qubit q_1 and store the information on the classical bit c_1
qc.draw(output='mpl') # we draw the circuit
# Cell 10
def run_circuit(qc):
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
result = execute(qc, backend, shots = 1000).result() # we run the simulation
counts = result.get_counts() # we get the counts
return counts
counts = run_circuit(qc)
print(counts)
plot_histogram(counts) # let us plot a histogram to see the possible outcomes and corresponding probabilities
# Cell 11
def create_circuit7():
qc = QuantumCircuit(2)
qc.rx(np.pi/3,0)
qc.x(1)
qc.swap(0, 1)
return qc
qc = create_circuit7()
def create_circuit():
qc.x(0)
qc.h(1)
qc.sdg(1)
qc.swap(0, 1)
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check7(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
# Cell 12
#
#
# FILL YOUR CODE IN HERE
def create_circuit8():
qc = QuantumCircuit(3,3) # this time, we not only want two qubits, but also
# two classical bits for the measurement later
def create_circuit():
qc = QuantumCircuit(3)
qc.h(2)
qc.cx(2, 1)
qc.cx(1, 0)
return qc
qc = create_circuit8()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
qc.measure(0, 0) # we perform a measurement on qubit q_0 and store the information on the classical bit c_0
qc.measure(1, 1) # we perform a measurement on qubit q_1 and store the information on the classical bit c_1
qc.measure(2, 2)
qc.draw(output='mpl') # we draw the circuit
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
#
def run_circuit(qc):
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
result = execute(qc, backend, shots = 2000).result() # we run the simulation
counts = result.get_counts() # we get the counts
return counts
counts = run_circuit(qc)
#
print(counts)
check8(counts)
plot_histogram(counts)
|
https://github.com/MuhammadMiqdadKhan/Solution-of-IBM-s-Global-Quantum-Challenge-2020
|
MuhammadMiqdadKhan
|
#initialization
%matplotlib inline
# Importing standard Qiskit libraries and configuring account
from qiskit import IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.providers.ibmq import least_busy
from qiskit.tools.jupyter import *
from qiskit.tools.monitor import job_monitor
from qiskit.visualization import *
from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter
provider = IBMQ.load_account() # load your IBM Quantum Experience account
# If you are a member of the IBM Q Network, fill your hub, group, and project information to
# get access to your premium devices.
# provider = IBMQ.get_provider(hub='', group='', project='')
from may4_challenge.ex2 import get_counts, show_final_answer
num_qubits = 5
meas_calibs, state_labels = complete_meas_cal(range(num_qubits), circlabel='mcal')
# find the least busy device that has at least 5 qubits
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= num_qubits and
not x.configuration().simulator and x.status().operational==True))
backend
# run experiments on a real device
shots = 8192
experiments = transpile(meas_calibs, backend=backend, optimization_level=3)
job = backend.run(assemble(experiments, shots=shots))
print(job.job_id())
%qiskit_job_watcher
# get measurement filter
cal_results = job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
meas_filter = meas_fitter.filter
#print(meas_fitter.cal_matrix)
meas_fitter.plot_calibration()
# get noisy counts
noisy_counts = get_counts(backend)
plot_histogram(noisy_counts[0])
# apply measurement error mitigation and plot the mitigated counts
mitigated_counts_0 = meas_filter.apply(noisy_counts[0])
plot_histogram([mitigated_counts_0, noisy_counts[0]])
# uncomment whatever answer you think is correct
#answer1 = 'a'
#answer1 = 'b'
answer1 = 'c'
#answer1 = 'd'
# plot noisy counts
plot_histogram(noisy_counts[1])
# apply measurement error mitigation
# insert your code here to do measurement error mitigation on noisy_counts[1]
plot_histogram([mitigated_counts_1, noisy_counts[1]])
# uncomment whatever answer you think is correct
#answer2 = 'a'
#answer2 = 'b'
#answer2 = 'c'
answer2 = 'd'
# plot noisy counts
plot_histogram(noisy_counts[2])
# apply measurement error mitigation
# insert your code here to do measurement error mitigation on noisy_counts[2]
plot_histogram([mitigated_counts_2, noisy_counts[2]])
# uncomment whatever answer you think is correct
#answer3 = 'a'
answer3 = 'b'
#answer3 = 'c'
#answer3 = 'd'
# plot noisy counts
plot_histogram(noisy_counts[3])
# apply measurement error mitigation
# insert your code here to do measurement error mitigation on noisy_counts[3]
plot_histogram([mitigated_counts_3, noisy_counts[3]])
# uncomment whatever answer you think is correct
#answer4 = 'a'
answer4 = 'b'
#answer4 = 'c'
#answer4 = 'd'
# answer string
show_final_answer(answer1, answer2, answer3, answer4)
|
https://github.com/MuhammadMiqdadKhan/Solution-of-IBM-s-Global-Quantum-Challenge-2020
|
MuhammadMiqdadKhan
|
%matplotlib inline
# Importing standard Qiskit libraries
import random
from qiskit import execute, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from may4_challenge.ex3 import alice_prepare_qubit, check_bits, check_key, check_decrypted, show_message
# Configuring account
provider = IBMQ.load_account()
backend = provider.get_backend('ibmq_qasm_simulator') # with this simulator it wouldn't work \
# Initial setup
random.seed(84) # do not change this seed, otherwise you will get a different key
# This is your 'random' bit string that determines your bases
numqubits = 100
bob_bases = str('{0:0100b}'.format(random.getrandbits(numqubits)))
def bb84():
print('Bob\'s bases:', bob_bases)
# Now Alice will send her bits one by one...
all_qubit_circuits = []
for qubit_index in range(numqubits):
# This is Alice creating the qubit
thisqubit_circuit = alice_prepare_qubit(qubit_index)
# This is Bob finishing the protocol below
bob_measure_qubit(bob_bases, qubit_index, thisqubit_circuit)
# We collect all these circuits and put them in an array
all_qubit_circuits.append(thisqubit_circuit)
# Now execute all the circuits for each qubit
results = execute(all_qubit_circuits, backend=backend, shots=1).result()
# And combine the results
bits = ''
for qubit_index in range(numqubits):
bits += [measurement for measurement in results.get_counts(qubit_index)][0]
return bits
# Here is your task
def bob_measure_qubit(bob_bases, qubit_index, qubit_circuit):
#
#qc = QuantumCircuit()
if bob_bases[qubit_index] == '1':
qubit_circuit.h(0)
else:
pass
#return qc
#qubit_circuit.cx(bob_bases, qubit_index)
# insert your code here to measure Alice's bits
qubit_circuit.measure(0, 0)
#
...
bits = bb84()
print('Bob\'s bits: ', bits)
check_bits(bits)
alice_bases = '1000000000010001111111001101100101000111110100110111111000110000011000001001100011100111010010000110' # Alice's bases bits
bob_bases = '1100111010011111111111110100000111010100100010011001001110100001010010111011010001011001111111011111'
#bob ='1000010000010000001110100010100111100101000111001111100010000110000000011110110101101101101001011000'
#bob_bases = bob[::-1]
#print(bob_bases)
#def getKey():
key = ""
for qubit_index in range(len(bob_bases)):
if alice_bases[qubit_index] == bob_bases[qubit_index]:
key = key + bits[qubit_index]
else:
continue
print(key)
check_key(key)
m = '0011011010100011101000001100010000001000011000101110110111100111111110001111100011100101011010111010111010001'\
'1101010010111111100101000011010011011011011101111010111000101111111001010101001100101111011' # encrypted message
key='10000010001110010011101001010000110000110011100000100000100011100100111010010100001100001100111000001000001000111001001110100101000011000011001110000010000010001110010011101001010000110000110011100000'
c=int(m, 2) ^ int(key, 2)
decrypted = "{0:b}".format(c)
print(decrypted)
check_decrypted(decrypted)
MORSE_CODE_DICT = { 'a':'.-', 'b':'-...',
'c':'-.-.', 'd':'-..', 'e':'.',
'f':'..-.', 'g':'--.', 'h':'....',
'i':'..', 'j':'.---', 'k':'-.-',
'l':'.-..', 'm':'--', 'n':'-.',
'o':'---', 'p':'.--.', 'q':'--.-',
'r':'.-.', 's':'...', 't':'-',
'u':'..-', 'v':'...-', 'w':'.--',
'x':'-..-', 'y':'-.--', 'z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'}
# insert your code here to decode Alice's Morse code
#I solved this part of excercise manually but I will upload it's code soon IA
solution="http://reddit.com/R/MAY4QUANTUM"
show_message(solution)
|
https://github.com/MuhammadMiqdadKhan/Solution-of-IBM-s-Global-Quantum-Challenge-2020
|
MuhammadMiqdadKhan
|
from may4_challenge.ex4 import get_unitary
U = get_unitary()
from may4_challenge.ex4 import get_unitary
from qiskit import QuantumCircuit, transpile, execute, BasicAer, extensions
from qiskit.visualization import *
from qiskit import QuantumCircuit
from may4_challenge.ex4 import check_circuit, submit_circuit
import numpy as np
import sklearn as sk
import scipy as scipy
from scipy import linalg
from qiskit.visualization import plot_state_city
from qiskit.compiler import transpile
from qiskit import Aer,execute,QuantumRegister
from math import pi
from qiskit import transpile
import scipy
import numpy as np
from qiskit import QuantumCircuit
from may4_challenge.ex4 import check_circuit, submit_circuit
#U = get_unitary()
#pi = 3.141
#print(U)
#print("U has shape", U.shape)
#H = scipy.linalg.hadamard(16)/4
#U = np.dot(H, U)
#U = np.dot(U, H)
#qc = QuantumCircuit(4)
#print(pi)
#qc.u3(pi/2,0,pi,0)
#qc.u3(pi/2,0,pi,1)
#qc.u3(pi/2,0,pi,2)
#qc.u3(pi/2,0,pi,3)
#qc.isometry(U,[0,1,2,3],[])
#qc.u3(pi/2,0,pi,0)
#qc.u3(pi/2,0,pi,1)
#qc.u3(pi/2,0,pi,2)
#qc.u3(pi/2,0,pi,3)
#qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=0, optimization_level=2)
#print('gates = ', qc.count_ops())
#print('depth = ', qc.depth()
from scipy.linalg import hadamard
H = hadamard(16, dtype=complex)/4
U = np.dot(H, U)
U = np.dot(U, H)
qc = QuantumCircuit(4)
qc.u3(pi/2,0,pi,range(4))
qc.isometry(U,[0,1,2,3],[])
qc.u3(pi/2,0,pi,range(4))
qc = transpile(qc, basis_gates = ['u3', 'cx'], seed_transpiler=5, optimization_level=2)
##### check your quantum circuit by running the next line
check_circuit(qc)
from may4_challenge.ex4 import submit_circuit
submit_circuit(qc)
|
https://github.com/ctuning/ck-qiskit
|
ctuning
|
import numpy as np
import IPython
import ipywidgets as widgets
import colorsys
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister
from qiskit import execute, Aer, BasicAer
from qiskit.visualization import plot_bloch_multivector
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import os
import glob
import moviepy.editor as mpy
import seaborn as sns
sns.set()
'''========State Vector======='''
def getStateVector(qc):
'''get state vector in row matrix form'''
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc,backend).result()
vec = job.get_statevector(qc)
return vec
def vec_in_braket(vec: np.ndarray) -> str:
'''get bra-ket notation of vector'''
nqubits = int(np.log2(len(vec)))
state = ''
for i in range(len(vec)):
rounded = round(vec[i], 3)
if rounded != 0:
basis = format(i, 'b').zfill(nqubits)
state += np.str(rounded).replace('-0j', '+0j')
state += '|' + basis + '\\rangle + '
state = state.replace("j", "i")
return state[0:-2].strip()
def vec_in_text_braket(vec):
return '$$\\text{{State:\n $|\\Psi\\rangle = $}}{}$$'\
.format(vec_in_braket(vec))
def writeStateVector(vec):
return widgets.HTMLMath(vec_in_text_braket(vec))
'''==========Bloch Sphere ========='''
def getBlochSphere(qc):
'''plot multi qubit bloch sphere'''
vec = getStateVector(qc)
return plot_bloch_multivector(vec)
def getBlochSequence(path,figs):
'''plot block sphere sequence and save it
to a folder for gif movie creation'''
try:
os.mkdir(path)
except:
print('Directory already exist')
for i,fig in enumerate(figs):
fig.savefig(path+"/rot_"+str(i)+".png")
return
def getBlochGif(figs,path,fname,fps,remove = True):
'''create gif movie from provided images'''
file_list = glob.glob(path + "/*.png")
list.sort(file_list, key=lambda x: int(x.split('_')[1].split('.png')[0]))
clip = mpy.ImageSequenceClip(file_list, fps=fps)
clip.write_gif('{}.gif'.format(fname), fps=fps)
'''remove all image files after gif creation'''
if remove:
for file in file_list:
os.remove(file)
return
'''=========Matrix================='''
def getMatrix(qc):
'''get numpy matrix representing a circuit'''
backend = BasicAer.get_backend('unitary_simulator')
job = execute(qc, backend)
ndArray = job.result().get_unitary(qc, decimals=3)
Matrix = np.matrix(ndArray)
return Matrix
def plotMatrix(M):
'''visualize a matrix using seaborn heatmap'''
MD = [["0" for i in range(M.shape[0])] for j in range(M.shape[1])]
for i in range(M.shape[0]):
for j in range(M.shape[1]):
r = M[i,j].real
im = M[i,j].imag
MD[i][j] = str(r)[0:4]+ " , " +str(im)[0:4]
plt.figure(figsize = [2*M.shape[1],M.shape[0]])
sns.heatmap(np.abs(M),\
annot = np.array(MD),\
fmt = '',linewidths=.5,\
cmap='Blues')
return
'''=========Measurement========'''
def getCount(qc):
backend= Aer.get_backend('qasm_simulator')
result = execute(qc,backend).result()
counts = result.get_counts(qc)
return counts
def plotCount(counts,figsize):
plot_histogram(counts)
'''========Phase============'''
def getPhaseCircle(vec):
'''get phase color, angle and radious of phase circir'''
Phase = []
for i in range(len(vec)):
angles = (np.angle(vec[i]) + (np.pi * 4)) % (np.pi * 2)
rgb = colorsys.hls_to_rgb(angles / (np.pi * 2), 0.5, 0.5)
mag = np.abs(vec[i])
Phase.append({"rgb":rgb,"mag": mag,"ang":angles})
return Phase
def getPhaseDict(QCs):
'''get a dictionary of state vector phase circles for
each quantum circuit and populate phaseDict list'''
phaseDict = []
for qc in QCs:
vec = getStateVector(qc)
Phase = getPhaseCircle(vec)
phaseDict.append(Phase)
return phaseDict
def plotiPhaseCircle(phaseDict,depth,path,show=False,save=False):
'''plot any quantum circuit phase circle diagram
from provided phase Dictionary'''
r = 0.30
dx = 1.0
nqubit = len(phaseDict[0])
fig = plt.figure(figsize = [depth,nqubit])
for i in range(depth):
x0 = i
for j in range(nqubit):
y0 = j+1
try:
mag = phaseDict[i][j]['mag']
ang = phaseDict[i][j]['ang']
rgb = phaseDict[i][j]['rgb']
ax=plt.gca()
circle1= plt.Circle((dx+x0,y0), radius = r, color = 'white')
ax.add_patch(circle1)
circle2= plt.Circle((dx+x0,y0), radius= r*mag, color = rgb)
ax.add_patch(circle2)
line = plt.plot((dx+x0,dx+x0+(r*mag*np.cos(ang))),\
(y0,y0+(r*mag*np.sin(ang))),color = "black")
except:
ax=plt.gca()
circle1= plt.Circle((dx+x0,y0), radius = r, color = 'white')
ax.add_patch(circle1)
plt.ylim(nqubit+1,0)
plt.yticks([y+1 for y in range(nqubit)])
plt.xticks([x for x in range(depth+2)])
plt.xlabel("Circuit Depth")
plt.ylabel("Basis States")
if show:
plt.show()
plt.savefig(path+".png")
plt.close(fig)
if save:
plt.savefig(path +".png")
plt.close(fig)
return
def plotiPhaseCircle_rotated(phaseDict,depth,path,show=False,save=False):
'''plot any quantum circuit phase circle diagram
from provided phase Dictionary'''
r = 0.30
dy = 1.0
nqubit = len(phaseDict[0])
fig = plt.figure(figsize = [nqubit,depth])
for i in range(depth):
y0 = i
for j in range(nqubit):
x0 = j+1
try:
mag = phaseDict[i][j]['mag']
ang = phaseDict[i][j]['ang']
rgb = phaseDict[i][j]['rgb']
ax=plt.gca()
circle1= plt.Circle((x0,dy+y0), radius = r, color = 'white')
ax.add_patch(circle1)
circle2= plt.Circle((x0,dy+y0), radius= r*mag, color = rgb)
ax.add_patch(circle2)
line = plt.plot((x0,x0+(r*mag*np.cos(ang))),\
(dy+y0,dy+y0+(r*mag*np.sin(ang))),color = "black")
except:
ax=plt.gca()
circle1= plt.Circle((x0,dy+y0), radius = r, color = 'white')
ax.add_patch(circle1)
plt.ylim(0,depth+1)
plt.yticks([x+1 for x in range(depth)])
plt.xticks([y for y in range(nqubit+2)])
plt.ylabel("Circuit Depth")
plt.xlabel("Basis States")
if show:
plt.show()
plt.savefig(path+".png")
plt.close(fig)
if save:
plt.savefig(path +".png")
plt.close(fig)
return
def getPhaseSequence(QCs,path,rotated=False):
'''plot a sequence of phase circle diagram for a given
sequence of quantum circuits'''
try:
os.mkdir(path)
except:
print("Directory already exist")
depth = len(QCs)
phaseDict =[]
for i,qc in enumerate(QCs):
vec = getStateVector(qc)
Phase = getPhaseCircle(vec)
phaseDict.append(Phase)
ipath = path + "phase_" + str(i)
if rotated:
plotiPhaseCircle_rotated(phaseDict,depth,ipath,save=True,show=False)
else:
plotiPhaseCircle(phaseDict,depth,ipath,save=True,show=False)
return
def getPhaseGif(path,fname,fps,remove = True):
'''create a gif movie file from phase circle figures'''
file_list = glob.glob(path+ "/*.png")
list.sort(file_list, key=lambda x: int(x.split('_')[1].split('.png')[0]))
clip = mpy.ImageSequenceClip(file_list, fps=fps)
clip.write_gif('{}.gif'.format(fname), fps=fps)
'''remove all image files after gif creation'''
if remove:
for file in file_list:
os.remove(file)
return
|
https://github.com/ctuning/ck-qiskit
|
ctuning
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Example use of the initialize gate to prepare arbitrary pure states.
"""
import math
from qiskit import QuantumCircuit, execute, BasicAer
###############################################################
# Make a quantum circuit for state initialization.
###############################################################
circuit = QuantumCircuit(4, 4, name="initializer_circ")
desired_vector = [
1 / math.sqrt(4) * complex(0, 1),
1 / math.sqrt(8) * complex(1, 0),
0,
0,
0,
0,
0,
0,
1 / math.sqrt(8) * complex(1, 0),
1 / math.sqrt(8) * complex(0, 1),
0,
0,
0,
0,
1 / math.sqrt(4) * complex(1, 0),
1 / math.sqrt(8) * complex(1, 0),
]
circuit.initialize(desired_vector, [0, 1, 2, 3])
circuit.measure([0, 1, 2, 3], [0, 1, 2, 3])
print(circuit)
###############################################################
# Execute on a simulator backend.
###############################################################
shots = 10000
# Desired vector
print("Desired probabilities: ")
print([format(abs(x * x), ".3f") for x in desired_vector])
# Initialize on local simulator
sim_backend = BasicAer.get_backend("qasm_simulator")
job = execute(circuit, sim_backend, shots=shots)
result = job.result()
counts = result.get_counts(circuit)
qubit_strings = [format(i, "04b") for i in range(2**4)]
print("Probabilities from simulator: ")
print([format(counts.get(s, 0) / shots, ".3f") for s in qubit_strings])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.