repo
stringclasses 900
values | file
stringclasses 754
values | content
stringlengths 4
215k
|
|---|---|---|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rcParams.update({"text.usetex": True})
plt.rcParams["figure.figsize"] = (6,4)
mpl.rcParams["figure.dpi"] = 200
from qiskit_ibm_runtime import Estimator, Session, QiskitRuntimeService, Options
from qiskit.quantum_info import SparsePauliOp
from qiskit import QuantumCircuit
service = QiskitRuntimeService()
backend_simulator = "backend_simulator"
backend = "ibmq_montreal"
qubits = 4
trotter_layer = QuantumCircuit(qubits)
trotter_layer.rx(0.1, range(qubits))
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.rz(-0.2, [1, 3])
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.cx(1, 2)
trotter_layer.rz(-0.2, 2)
trotter_layer.cx(1, 2)
num_steps = 6
trotter_circuit_list = []
for i in range(1, num_steps):
trotter_circuit = QuantumCircuit(qubits)
for _ in range(i):
trotter_circuit = trotter_circuit.compose(trotter_layer)
trotter_circuit_list.append(trotter_circuit)
print(f'Trotter circuit with {i} Trotter steps`)
display(trotter_circuit.draw(fold=-1))
obs = SparsePauliOp("Z"*qubits)
obs_list = [obs]*len(trotter_circuit_list)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No mitigation
with Session(service=service, backend=backend_simulator) as session:
estimator_sim = Estimator(session=session, options=options)
job_sim = estimator_sim.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_sim.job_id)
print(job_sim.result())
expvals_ideal = job_sim.result().values
expvals_ideal_variance = [metadata['variance']/metadata['shots'] for metadata in job_sim.result().metadata]
std_error_ideal = np.sqrt(expvals_ideal_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job.job_id)
print(job.result())
expvals_unmit = job.result().values
expvals_unmit_variance = [metadata['variance']/metadata['shots'] for metadata in job.result().metadata]
std_error_unmit = np.sqrt(expvals_unmit_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 3 # Dynamical decoupling
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_dd = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_dd.job_id)
print(job_dd.result())
expvals_unmit_dd = job_dd.result().values
expvals_unmit_dd_variance = [metadata['variance']/metadata['shots'] for metadata in job_dd.result().metadata]
std_error_dd = np.sqrt(expvals_unmit_dd_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_dd, std_error_dd, fmt = 'o', linestyle = '-', capsize=4, c='blue', label='Dynamical decoupling')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.resilience_level = 1 # T-REx
options.optimization_level = 0 # No optimization
options.execution.shots = 1000
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_trex = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_trex.job_id)
print(job_trex.result())
expvals_unmit_trex = job_trex.result().values
expvals_unmit_trex_variance = [metadata['variance']/metadata['shots'] for metadata in job_trex.result().metadata]
std_error_trex = np.sqrt(expvals_unmit_trex_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 2 # ZNE
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_zne = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne.job_id)
print(job_zne.result())
expvals_unmit_zne = job_zne.result().values
# Standard error: coming soon!
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.xlabel('No. Trotter Steps')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
def interim_results_callback(job_id, result):
now = datetime.datetime.now()
print(now, "*** Callback ***", result, "\n")
options = Options()
options.optimization_level = 0 # No optimization
options.execution.shots = 100
options.resilience_level = 3 # PEC
options.environment.callback = interim_results_callback
with Session(service=service, backend=backend) as session:
estimator_pec = Estimator(session=session, options=options)
job_pec = estimator_pec.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_pec.job_id)
expvals_pec = job_pec.result().values
std_error_pec = [metadata['standard_error'] for metadata in job_pec.result().metadata]
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
print(job_pec.result())
pec_metadata = job_pec.result().metadata
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
twin1 = ax.twinx()
twin2 = ax.twinx()
twin3 = ax.twinx()
twin2.spines.right.set_position(("axes", 1.2))
twin3.spines.right.set_position(("axes", 1.4))
p1, = ax.plot(range(1, num_steps), [m["total_mitigated_layers"] for m in pec_metadata] , "b-", label="Total mitigated layers")
p2, = twin1.plot(range(1, num_steps), [m["sampling_overhead"] for m in pec_metadata], "r-", label="Sampling overhead")
p3, = twin2.plot(range(1, num_steps), [m["samples"] for m in pec_metadata], "g-", label="Samples")
p4, = twin3.plot(range(1, num_steps), [m["shots"] for m in pec_metadata], "c-", label="Shots")
ax.set_ylim(0, 20)
twin1.set_ylim(0, 2.8)
twin2.set_ylim(0, 300)
twin3.set_ylim(0, 35000)
ax.set_xlabel("No. Trotter Steps")
ax.set_ylabel("Total mitigated layers")
twin1.set_ylabel("Sampling overhead")
twin2.set_ylabel("Samples")
twin3.set_ylabel("Shots")
ax.yaxis.label.set_color(p1.get_color())
twin1.yaxis.label.set_color(p2.get_color())
twin2.yaxis.label.set_color(p3.get_color())
twin3.yaxis.label.set_color(p4.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
twin1.tick_params(axis='y', colors=p2.get_color(), **tkw)
twin2.tick_params(axis='y', colors=p3.get_color(), **tkw)
twin3.tick_params(axis='y', colors=p4.get_color(), **tkw)
plt.xticks([1, 2, 3, 4, 5])
ax.legend(handles=[p1, p2, p3, p4])
plt.title('PEC metadata')
plt.show()
from matplotlib.pyplot import figure
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.title('Trotter circuits expectation value')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # no optimization
options.resilience_level = 2 # ZNE
options.resilience.noise_factors = [1, 2, 3, 4]
options.resilience.noise_amplifier = "LocalFoldingAmplifier"
options.resilience.extrapolator = "QuadraticExtrapolator"
with Session(service=service, backend='ibmq_montreal') as session:
estimator = Estimator(session=session, options=options)
job_zne_options = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne_options.job_id)
print(job_zne_options.result())
from qiskit.tools import jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import random
from qiskit.quantum_info import Statevector
secret = random.randint(0,7) # the owner is randomly picked
secret_string = format(secret, '03b') # format the owner in 3-bit string
oracle = Statevector.from_label(secret_string) # let the oracle know the owner
from qiskit.algorithms import AmplificationProblem
problem = AmplificationProblem(oracle, is_good_state=secret_string)
from qiskit.algorithms import Grover
grover_circuits = []
for iteration in range(1,3):
grover = Grover(iterations=iteration)
circuit = grover.construct_circuit(problem)
circuit.measure_all()
grover_circuits.append(circuit)
# Grover's circuit with 1 iteration
grover_circuits[0].draw()
# Grover's circuit with 2 iterations
grover_circuits[1].draw()
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
sampler = Sampler()
job = sampler.run(circuits=grover_circuits, shots=1000)
result = job.result()
print(result)
from qiskit.tools.visualization import plot_histogram
# Extract bit string with highest probability from results as the answer
result_dict = result.quasi_dists[1].binary_probabilities()
answer = max(result_dict, key=result_dict.get)
print(f"As you can see, the quantum computer returned '{answer}' as the answer with highest probability.\n"
"And the results with 2 iterations have higher probability than the results with 1 iteration."
)
# Plot the results
plot_histogram(result.quasi_dists, legend=['1 iteration', '2 iterations'])
# Print the results and the correct answer.
print(f"Quantum answer: {answer}")
print(f"Correct answer: {secret_string}")
print('Success!' if answer == secret_string else 'Failure!')
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.quantum_info import SparsePauliOp
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit.primitives import Estimator
estimator = Estimator()
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
circuit = random_circuit(2, 2, seed=1).decompose(reps=1)
observable = SparsePauliOp("IY")
job = estimator.run(circuit, observable)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Expectation value: {result.values[0]}")
circuits = (
random_circuit(2, 2, seed=0).decompose(reps=1),
random_circuit(2, 2, seed=1).decompose(reps=1),
)
observables = (
SparsePauliOp("XZ"),
SparsePauliOp("IY"),
)
job = estimator.run(circuits, observables)
result = job.result()
[display(cir.draw("mpl")) for cir in circuits]
print(f">>> Observables: {[obs.paulis for obs in observables]}")
print(f">>> Expectation values: {result.values.tolist()}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
observable = SparsePauliOp("ZI")
parameter_values = [0, 1, 2, 3, 4, 5]
job = estimator.run(circuit, observable, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Parameter values: {parameter_values}")
print(f">>> Expectation value: {result.values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Estimator
estimator = Estimator(session=backend)
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Metadata: {result.metadata[0]}")
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value: {result.values[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
estimator = Estimator()
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the first run: {result.values[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the second run: {result.values[0]}")
from qiskit.circuit.random import random_circuit
sampler_circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
sampler_circuit.measure_all()
display(circuit.draw("mpl"))
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(sampler_circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0, measure=True).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.primitives import Sampler
sampler = Sampler()
job = sampler.run(circuit)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Quasi-distribution: {result.quasi_dists[0]}")
circuit = random_circuit(2, 2, seed=1, measure=True).decompose(reps=1)
job = sampler.run(circuit)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
circuits = (
random_circuit(2, 2, seed=0, measure=True).decompose(reps=1),
random_circuit(2, 2, seed=1, measure=True).decompose(reps=1),
)
job = sampler.run(circuits)
result = job.result()
[display(cir.draw("mpl")) for cir in circuits]
print(f">>> Quasi-distribution: {result.quasi_dists}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
circuit.measure_all()
parameter_values = [0, 1, 2, 3, 4, 5]
job = sampler.run(circuit, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Parameter values: {parameter_values}")
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0, measure=True).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit_ibm_runtime import Sampler
sampler = Sampler(session=backend)
job = sampler.run(circuit)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Quasi-distribution: {result.quasi_dists[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit).result()
print(f">>> Metadata: {result.metadata[0]}")
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
sampler = Sampler()
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution from the first run: {result.quasi_dists[0]}")
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution from the second run: {result.quasi_dists[0]}")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
estimator_circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(estimator_circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(estimator_circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(circuit)
estimator_job = estimator.run(estimator_circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(circuit)
estimator_job = estimator.run(estimator_circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit.library import QFT
def create_qpe_circuit(theta, num_qubits):
'''Creates a QPE circuit given theta and num_qubits.'''
# Step 1: Create a circuit with two quantum registers and one classical register.
first = QuantumRegister(size=num_qubits, name='first') # the first register for phase estimation
second = QuantumRegister(size=1, name='second') # the second register for storing eigenvector |psi>
classical = ClassicalRegister(size=num_qubits, name='readout') # classical register for readout
qpe_circuit = QuantumCircuit(first, second, classical)
# Step 2: Initialize the qubits.
# All qubits are initialized in |0> by default, no extra code is needed to initialize the first register.
qpe_circuit.x(second) # Initialize the second register with state |psi>, which is |1> in this example.
# Step 3: Create superposition in the first register.
qpe_circuit.barrier() # Add barriers to separate each step of the algorithm for better visualization.
qpe_circuit.h(first)
# Step 4: Apply a controlled-U^(2^j) black box.
qpe_circuit.barrier()
for j in range(num_qubits):
qpe_circuit.cp(theta*2*np.pi*(2**j), j, num_qubits) # Theta doesn't contain the 2 pi factor.
# Step 5: Apply an inverse QFT to the first register.
qpe_circuit.barrier()
qpe_circuit.compose(QFT(num_qubits, inverse=True), inplace=True)
# Step 6: Measure the first register.
qpe_circuit.barrier()
qpe_circuit.measure(first, classical)
return qpe_circuit
num_qubits = 4
qpe_circuit_fixed_phase = create_qpe_circuit(1/2, num_qubits) # Create a QPE circuit with fixed theta=1/2.
qpe_circuit_fixed_phase.draw('mpl')
from qiskit.circuit import Parameter
theta = Parameter('theta') # Create a parameter `theta` whose values can be assigned later.
qpe_circuit_parameterized = create_qpe_circuit(theta, num_qubits)
qpe_circuit_parameterized.draw('mpl')
number_of_phases = 21
phases = np.linspace(0, 2, number_of_phases)
individual_phases = [[ph] for ph in phases] # Phases need to be expressed as a list of lists.
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
results = Sampler().run(
[qpe_circuit_parameterized]*len(individual_phases),
parameter_values=individual_phases
).result()
from qiskit.tools.visualization import plot_histogram
idx = 6
plot_histogram(results.quasi_dists[idx].binary_probabilities(), legend=[f'$\\theta$={phases[idx]:.3f}'])
def most_likely_bitstring(results_dict):
'''Finds the most likely outcome bit string from a result dictionary.'''
return max(results_dict, key=results_dict.get)
def find_neighbors(bitstring):
'''Finds the neighbors of a bit string.
Example:
For bit string '1010', this function returns ('1001', '1011')
'''
if bitstring == len(bitstring)*'0':
neighbor_left = len(bitstring)*'1'
else:
neighbor_left = format((int(bitstring,2)-1), '0%sb'%len(bitstring))
if bitstring == len(bitstring)*'1':
neighbor_right = len(bitstring)*'0'
else:
neighbor_right = format((int(bitstring,2)+1), '0%sb'%len(bitstring))
return (neighbor_left, neighbor_right)
def estimate_phase(results_dict):
'''Estimates the phase from a result dictionary of a QPE circuit.'''
# Find the most likely outcome bit string N1 and its neighbors.
num_1_key = most_likely_bitstring(results_dict)
neighbor_left, neighbor_right = find_neighbors(num_1_key)
# Get probabilities of N1 and its neighbors.
num_1_prob = results_dict.get(num_1_key)
neighbor_left_prob = results_dict.get(neighbor_left)
neighbor_right_prob = results_dict.get(neighbor_right)
# Find the second most likely outcome N2 and its probability P2 among the neighbors.
if neighbor_left_prob is None:
# neighbor_left doesn't exist
if neighbor_right_prob is None:
# both neighbors don't exist, N2 is N1
num_2_key = num_1_key
num_2_prob = num_1_prob
else:
# If only neighbor_left doesn't exist, N2 is neighbor_right.
num_2_key = neighbor_right
num_2_prob = neighbor_right_prob
elif neighbor_right_prob is None:
# If only neighbor_right doesn't exist, N2 is neighbor_left.
num_2_key = neighbor_left
num_2_prob = neighbor_left_prob
elif neighbor_left_prob > neighbor_right_prob:
# Both neighbors exist and neighbor_left has higher probability, so N2 is neighbor_left.
num_2_key = neighbor_left
num_2_prob = neighbor_left_prob
else:
# Both neighbors exist and neighbor_right has higher probability, so N2 is neighor_right.
num_2_key = neighbor_right
num_2_prob = neighbor_right_prob
# Calculate the estimated phases for N1 and N2.
num_qubits = len(num_1_key)
num_1_phase = (int(num_1_key, 2) / 2**num_qubits)
num_2_phase = (int(num_2_key, 2) / 2**num_qubits)
# Calculate the weighted average phase from N1 and N2.
phase_estimated = (num_1_phase * num_1_prob + num_2_phase * num_2_prob) / (num_1_prob + num_2_prob)
return phase_estimated
qpe_solutions = []
for idx, result_dict in enumerate(results.quasi_dists):
qpe_solutions.append(estimate_phase(result_dict.binary_probabilities()))
ideal_solutions = np.append(
phases[:(number_of_phases-1)//2], # first period
np.subtract(phases[(number_of_phases-1)//2:-1], 1) # second period
)
ideal_solutions = np.append(ideal_solutions, np.subtract(phases[-1], 2)) # starting point of the third period
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 6))
plt.plot(phases, ideal_solutions, '--', label='Ideal solutions')
plt.plot(phases, qpe_solutions, 'o', label='QPE solutions')
plt.title('Quantum Phase Estimation Algorithm')
plt.xlabel('Input Phase')
plt.ylabel('Output Phase')
plt.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# load necessary Runtime libraries
from qiskit_ibm_runtime import QiskitRuntimeService, Sampler, Session
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit.circuit import Parameter
from qiskit.opflow import I, X, Z
mu = Parameter('$\\mu$')
ham_pauli = mu * X
cc = Parameter('$c$')
ww = Parameter('$\\omega$')
ham_res = -(1/2)*ww*(I^Z) + cc*(X^X) + (ham_pauli^I)
tt = Parameter('$t$')
U_ham = (tt*ham_res).exp_i()
from qiskit import transpile
from qiskit.circuit import ClassicalRegister
from qiskit.opflow import PauliTrotterEvolution, Suzuki
import numpy as np
num_trot_steps = 5
total_time = 10
cr = ClassicalRegister(1, 'c')
spec_op = PauliTrotterEvolution(trotter_mode=Suzuki(order=2, reps=num_trot_steps)).convert(U_ham)
spec_circ = spec_op.to_circuit()
spec_circ_t = transpile(spec_circ, basis_gates=['sx', 'rz', 'cx'])
spec_circ_t.add_register(cr)
spec_circ_t.measure(0, cr[0])
spec_circ_t.draw('mpl')
# fixed Parameters
fixed_params = {
cc: 0.3,
mu: 0.7,
tt: total_time
}
# Parameter value for single circuit
param_keys = list(spec_circ_t.parameters)
# run through all the ww values to create a List of Lists of Parameter value
num_pts = 101
wvals = np.linspace(-2, 2, num_pts)
param_vals = []
for wval in wvals:
all_params = {**fixed_params, **{ww: wval}}
param_vals.append([all_params[key] for key in param_keys])
with Session(backend=backend):
sampler = Sampler()
job = sampler.run(
circuits=[spec_circ_t]*num_pts,
parameter_values=param_vals,
shots=1e5
)
result = job.result()
Zexps = []
for dist in result.quasi_dists:
if 1 in dist:
Zexps.append(1 - 2*dist[1])
else:
Zexps.append(1)
from qiskit.opflow import PauliExpectation, Zero
param_bind = {
cc: 0.3,
mu: 0.7,
tt: total_time
}
init_state = Zero^2
obsv = I^Z
Zexp_exact = (U_ham @ init_state).adjoint() @ obsv @ (U_ham @ init_state)
diag_meas_op = PauliExpectation().convert(Zexp_exact)
Zexact_values = []
for w_set in wvals:
param_bind[ww] = w_set
Zexact_values.append(np.real(diag_meas_op.bind_parameters(param_bind).eval()))
import matplotlib.pyplot as plt
plt.style.use('dark_background')
fig, ax = plt.subplots(dpi=100)
ax.plot([-param_bind[mu], -param_bind[mu]], [0, 1], ls='--', color='purple')
ax.plot([param_bind[mu], param_bind[mu]], [0, 1], ls='--', color='purple')
ax.plot(wvals, Zexact_values, label='Exact')
ax.plot(wvals, Zexps, label=f"{backend}")
ax.set_xlabel(r'$\omega$ (arb)')
ax.set_ylabel(r'$\langle Z \rangle$ Expectation')
ax.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# Create circuit to test transpiler on
from qiskit import QuantumCircuit
from qiskit.circuit.library import GroverOperator, Diagonal
oracle = Diagonal([1]*7 + [-1])
qc = QuantumCircuit(3)
qc.h([0,1,2])
qc = qc.compose(GroverOperator(oracle))
# Use Statevector object to calculate the ideal output
from qiskit.quantum_info import Statevector
ideal_distribution = Statevector.from_instruction(qc).probabilities_dict()
from qiskit.visualization import plot_histogram
plot_histogram(ideal_distribution)
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = service.backend('ibm_algiers')
# Need to add measurements to the circuit
qc.measure_all()
from qiskit import transpile
circuits = []
for optimization_level in [0, 3]:
t_qc = transpile(qc,
backend,
optimization_level=optimization_level,
seed_transpiler=0)
print(f'CNOTs (optimization_level={optimization_level}): ',
t_qc.count_ops()['cx'])
circuits.append(t_qc)
from qiskit.transpiler import PassManager, InstructionDurations
from qiskit.transpiler.passes import ASAPSchedule, DynamicalDecoupling
from qiskit.circuit.library import XGate
# Get gate durations so the transpiler knows how long each operation takes
durations = InstructionDurations.from_backend(backend)
# This is the sequence we'll apply to idling qubits
dd_sequence = [XGate(), XGate()]
# Run scheduling and dynamic decoupling passes on circuit
pm = PassManager([ASAPSchedule(durations),
DynamicalDecoupling(durations, dd_sequence)]
)
circ_dd = pm.run(circuits[1])
# Add this new circuit to our list
circuits.append(circ_dd)
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
sampler = Sampler()
job = sampler.run(
circuits=circuits, # sample all three circuits
skip_transpilation=True,
shots=8000)
result = job.result()
from qiskit.visualization import plot_histogram
binary_prob = [quasi_dist.binary_probabilities() for quasi_dist in result.quasi_dists]
plot_histogram(binary_prob+[ideal_distribution],
bar_labels=False,
legend=['optimization_level=0',
'optimization_level=3',
'optimization_level=3 + dd',
'ideal distribution'])
from qiskit.quantum_info import hellinger_fidelity
for counts in result.quasi_dists:
print(
f"{hellinger_fidelity(counts.binary_probabilities(), ideal_distribution):.3f}"
)
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver(
atom="H 0 0 0; H 0 0 0.72" # Two Hydrogen atoms, 0.72 Angstrom apart
)
molecule = driver.run()
from qiskit_nature.second_q.mappers import QubitConverter, ParityMapper
qubit_converter = QubitConverter(ParityMapper())
hamiltonian = qubit_converter.convert(molecule.second_q_ops()[0])
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
sol = NumPyMinimumEigensolver().compute_minimum_eigenvalue(hamiltonian)
real_solution = molecule.interpret(sol)
real_solution.groundenergy
from qiskit_ibm_runtime import QiskitRuntimeService, Estimator, Session, Options
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator"
from qiskit.algorithms.minimum_eigensolvers import VQE
# Use RealAmplitudes circuit to create trial states
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(num_qubits=2, reps=2)
# Search for better states using SPSA algorithm
from qiskit.algorithms.optimizers import SPSA
optimizer = SPSA(150)
# Set a starting point for reproduceability
import numpy as np
np.random.seed(6)
initial_point = np.random.uniform(-np.pi, np.pi, 12)
# Create an object to store intermediate results
from dataclasses import dataclass
@dataclass
class VQELog:
values: list
parameters: list
def update(self, count, parameters, mean, _metadata):
self.values.append(mean)
self.parameters.append(parameters)
print(f"Running circuit {count} of ~350", end="\r", flush=True)
log = VQELog([],[])
# Main calculation
with Session(service=service, backend=backend) as session:
options = Options()
options.optimization_level = 3
vqe = VQE(Estimator(session=session, options=options),
ansatz, optimizer, callback=log.update, initial_point=initial_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print("Experiment complete.".ljust(30))
print(f"Raw result: {result.optimal_value}")
if 'simulator' not in backend:
# Run once with ZNE error mitigation
options.resilience_level = 2
vqe = VQE(Estimator(session=session, options=options),
ansatz, SPSA(1), initial_point=result.optimal_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print(f"Mitigated result: {result.optimal_value}")
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
# Plot energy and reference value
plt.figure(figsize=(12, 6))
plt.plot(log.values, label="Estimator VQE")
plt.axhline(y=real_solution.groundenergy, color="tab:red", ls="--", label="Target")
plt.legend(loc="best")
plt.xlabel("Iteration")
plt.ylabel("Energy [H]")
plt.title("VQE energy")
plt.show()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit import Parameter
from qiskit import QuantumCircuit
theta = Parameter('$\\theta$')
chsh_circuits_no_meas = QuantumCircuit(2)
chsh_circuits_no_meas.h(0)
chsh_circuits_no_meas.cx(0, 1)
chsh_circuits_no_meas.ry(theta, 0)
chsh_circuits_no_meas.draw('mpl')
import numpy as np
number_of_phases = 21
phases = np.linspace(0, 2*np.pi, number_of_phases)
# Phases need to be expressed as list of lists in order to work
individual_phases = [[ph] for ph in phases]
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Estimator, Session
from qiskit.quantum_info import SparsePauliOp
ZZ = SparsePauliOp.from_list([("ZZ", 1)])
ZX = SparsePauliOp.from_list([("ZX", 1)])
XZ = SparsePauliOp.from_list([("XZ", 1)])
XX = SparsePauliOp.from_list([("XX", 1)])
ops = [ZZ, ZX, XZ, XX]
chsh_est_sim = []
# Simulator
with Session(service=service, backend=backend):
estimator = Estimator()
for op in ops:
job = estimator.run(
circuits=[chsh_circuits_no_meas]*len(individual_phases),
observables=[op]*len(individual_phases),
parameter_values=individual_phases)
est_result = job.result()
chsh_est_sim.append(est_result)
# <CHSH1> = <AB> - <Ab> + <aB> + <ab>
chsh1_est_sim = chsh_est_sim[0].values - chsh_est_sim[1].values + chsh_est_sim[2].values + chsh_est_sim[3].values
# <CHSH2> = <AB> + <Ab> - <aB> + <ab>
chsh2_est_sim = chsh_est_sim[0].values + chsh_est_sim[1].values - chsh_est_sim[2].values + chsh_est_sim[3].values
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
fig, ax = plt.subplots(figsize=(10, 6))
# results from a simulator
ax.plot(phases/np.pi, chsh1_est_sim, 'o-', label='CHSH1 Simulation')
ax.plot(phases/np.pi, chsh2_est_sim, 'o-', label='CHSH2 Simulation')
# classical bound +-2
ax.axhline(y=2, color='r', linestyle='--')
ax.axhline(y=-2, color='r', linestyle='--')
# quantum bound, +-2√2
ax.axhline(y=np.sqrt(2)*2, color='b', linestyle='-.')
ax.axhline(y=-np.sqrt(2)*2, color='b', linestyle='-.')
# set x tick labels to the unit of pi
ax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g $\pi$'))
ax.xaxis.set_major_locator(tck.MultipleLocator(base=0.5))
# set title, labels, and legend
plt.title('Violation of CHSH Inequality')
plt.xlabel('Theta')
plt.ylabel('CHSH witness')
plt.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rcParams.update({"text.usetex": True})
plt.rcParams["figure.figsize"] = (6,4)
mpl.rcParams["figure.dpi"] = 200
from qiskit_ibm_runtime import Estimator, Session, QiskitRuntimeService, Options
from qiskit.quantum_info import SparsePauliOp
from qiskit import QuantumCircuit
service = QiskitRuntimeService()
backend_simulator = "backend_simulator"
backend = "ibmq_montreal"
qubits = 4
trotter_layer = QuantumCircuit(qubits)
trotter_layer.rx(0.1, range(qubits))
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.rz(-0.2, [1, 3])
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.cx(1, 2)
trotter_layer.rz(-0.2, 2)
trotter_layer.cx(1, 2)
num_steps = 6
trotter_circuit_list = []
for i in range(1, num_steps):
trotter_circuit = QuantumCircuit(qubits)
for _ in range(i):
trotter_circuit = trotter_circuit.compose(trotter_layer)
trotter_circuit_list.append(trotter_circuit)
print(f'Trotter circuit with {i} Trotter steps`)
display(trotter_circuit.draw(fold=-1))
obs = SparsePauliOp("Z"*qubits)
obs_list = [obs]*len(trotter_circuit_list)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No mitigation
with Session(service=service, backend=backend_simulator) as session:
estimator_sim = Estimator(session=session, options=options)
job_sim = estimator_sim.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_sim.job_id)
print(job_sim.result())
expvals_ideal = job_sim.result().values
expvals_ideal_variance = [metadata['variance']/metadata['shots'] for metadata in job_sim.result().metadata]
std_error_ideal = np.sqrt(expvals_ideal_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job.job_id)
print(job.result())
expvals_unmit = job.result().values
expvals_unmit_variance = [metadata['variance']/metadata['shots'] for metadata in job.result().metadata]
std_error_unmit = np.sqrt(expvals_unmit_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 3 # Dynamical decoupling
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_dd = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_dd.job_id)
print(job_dd.result())
expvals_unmit_dd = job_dd.result().values
expvals_unmit_dd_variance = [metadata['variance']/metadata['shots'] for metadata in job_dd.result().metadata]
std_error_dd = np.sqrt(expvals_unmit_dd_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_dd, std_error_dd, fmt = 'o', linestyle = '-', capsize=4, c='blue', label='Dynamical decoupling')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.resilience_level = 1 # T-REx
options.optimization_level = 0 # No optimization
options.execution.shots = 1000
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_trex = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_trex.job_id)
print(job_trex.result())
expvals_unmit_trex = job_trex.result().values
expvals_unmit_trex_variance = [metadata['variance']/metadata['shots'] for metadata in job_trex.result().metadata]
std_error_trex = np.sqrt(expvals_unmit_trex_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 2 # ZNE
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_zne = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne.job_id)
print(job_zne.result())
expvals_unmit_zne = job_zne.result().values
# Standard error: coming soon!
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.xlabel('No. Trotter Steps')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
def interim_results_callback(job_id, result):
now = datetime.datetime.now()
print(now, "*** Callback ***", result, "\n")
options = Options()
options.optimization_level = 0 # No optimization
options.execution.shots = 100
options.resilience_level = 3 # PEC
options.environment.callback = interim_results_callback
with Session(service=service, backend=backend) as session:
estimator_pec = Estimator(session=session, options=options)
job_pec = estimator_pec.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_pec.job_id)
expvals_pec = job_pec.result().values
std_error_pec = [metadata['standard_error'] for metadata in job_pec.result().metadata]
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
print(job_pec.result())
pec_metadata = job_pec.result().metadata
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
twin1 = ax.twinx()
twin2 = ax.twinx()
twin3 = ax.twinx()
twin2.spines.right.set_position(("axes", 1.2))
twin3.spines.right.set_position(("axes", 1.4))
p1, = ax.plot(range(1, num_steps), [m["total_mitigated_layers"] for m in pec_metadata] , "b-", label="Total mitigated layers")
p2, = twin1.plot(range(1, num_steps), [m["sampling_overhead"] for m in pec_metadata], "r-", label="Sampling overhead")
p3, = twin2.plot(range(1, num_steps), [m["samples"] for m in pec_metadata], "g-", label="Samples")
p4, = twin3.plot(range(1, num_steps), [m["shots"] for m in pec_metadata], "c-", label="Shots")
ax.set_ylim(0, 20)
twin1.set_ylim(0, 2.8)
twin2.set_ylim(0, 300)
twin3.set_ylim(0, 35000)
ax.set_xlabel("No. Trotter Steps")
ax.set_ylabel("Total mitigated layers")
twin1.set_ylabel("Sampling overhead")
twin2.set_ylabel("Samples")
twin3.set_ylabel("Shots")
ax.yaxis.label.set_color(p1.get_color())
twin1.yaxis.label.set_color(p2.get_color())
twin2.yaxis.label.set_color(p3.get_color())
twin3.yaxis.label.set_color(p4.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
twin1.tick_params(axis='y', colors=p2.get_color(), **tkw)
twin2.tick_params(axis='y', colors=p3.get_color(), **tkw)
twin3.tick_params(axis='y', colors=p4.get_color(), **tkw)
plt.xticks([1, 2, 3, 4, 5])
ax.legend(handles=[p1, p2, p3, p4])
plt.title('PEC metadata')
plt.show()
from matplotlib.pyplot import figure
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.title('Trotter circuits expectation value')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # no optimization
options.resilience_level = 2 # ZNE
options.resilience.noise_factors = [1, 2, 3, 4]
options.resilience.noise_amplifier = "LocalFoldingAmplifier"
options.resilience.extrapolator = "QuadraticExtrapolator"
with Session(service=service, backend='ibmq_montreal') as session:
estimator = Estimator(session=session, options=options)
job_zne_options = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne_options.job_id)
print(job_zne_options.result())
from qiskit.tools import jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import random
from qiskit.quantum_info import Statevector
secret = random.randint(0,7) # the owner is randomly picked
secret_string = format(secret, '03b') # format the owner in 3-bit string
oracle = Statevector.from_label(secret_string) # let the oracle know the owner
from qiskit.algorithms import AmplificationProblem
problem = AmplificationProblem(oracle, is_good_state=secret_string)
from qiskit.algorithms import Grover
grover_circuits = []
for iteration in range(1,3):
grover = Grover(iterations=iteration)
circuit = grover.construct_circuit(problem)
circuit.measure_all()
grover_circuits.append(circuit)
# Grover's circuit with 1 iteration
grover_circuits[0].draw()
# Grover's circuit with 2 iterations
grover_circuits[1].draw()
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
sampler = Sampler()
job = sampler.run(circuits=grover_circuits, shots=1000)
result = job.result()
print(result)
from qiskit.tools.visualization import plot_histogram
# Extract bit string with highest probability from results as the answer
result_dict = result.quasi_dists[1].binary_probabilities()
answer = max(result_dict, key=result_dict.get)
print(f"As you can see, the quantum computer returned '{answer}' as the answer with highest probability.\n"
"And the results with 2 iterations have higher probability than the results with 1 iteration."
)
# Plot the results
plot_histogram(result.quasi_dists, legend=['1 iteration', '2 iterations'])
# Print the results and the correct answer.
print(f"Quantum answer: {answer}")
print(f"Correct answer: {secret_string}")
print('Success!' if answer == secret_string else 'Failure!')
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.quantum_info import SparsePauliOp
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit.primitives import Estimator
estimator = Estimator()
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
circuit = random_circuit(2, 2, seed=1).decompose(reps=1)
observable = SparsePauliOp("IY")
job = estimator.run(circuit, observable)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Expectation value: {result.values[0]}")
circuits = (
random_circuit(2, 2, seed=0).decompose(reps=1),
random_circuit(2, 2, seed=1).decompose(reps=1),
)
observables = (
SparsePauliOp("XZ"),
SparsePauliOp("IY"),
)
job = estimator.run(circuits, observables)
result = job.result()
[display(cir.draw("mpl")) for cir in circuits]
print(f">>> Observables: {[obs.paulis for obs in observables]}")
print(f">>> Expectation values: {result.values.tolist()}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
observable = SparsePauliOp("ZI")
parameter_values = [0, 1, 2, 3, 4, 5]
job = estimator.run(circuit, observable, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Parameter values: {parameter_values}")
print(f">>> Expectation value: {result.values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Estimator
estimator = Estimator(session=backend)
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Metadata: {result.metadata[0]}")
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value: {result.values[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
estimator = Estimator()
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the first run: {result.values[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the second run: {result.values[0]}")
from qiskit.circuit.random import random_circuit
sampler_circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
sampler_circuit.measure_all()
display(circuit.draw("mpl"))
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(sampler_circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0, measure=True).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.primitives import Sampler
sampler = Sampler()
job = sampler.run(circuit)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Quasi-distribution: {result.quasi_dists[0]}")
circuit = random_circuit(2, 2, seed=1, measure=True).decompose(reps=1)
job = sampler.run(circuit)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
circuits = (
random_circuit(2, 2, seed=0, measure=True).decompose(reps=1),
random_circuit(2, 2, seed=1, measure=True).decompose(reps=1),
)
job = sampler.run(circuits)
result = job.result()
[display(cir.draw("mpl")) for cir in circuits]
print(f">>> Quasi-distribution: {result.quasi_dists}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
circuit.measure_all()
parameter_values = [0, 1, 2, 3, 4, 5]
job = sampler.run(circuit, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Parameter values: {parameter_values}")
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0, measure=True).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit_ibm_runtime import Sampler
sampler = Sampler(session=backend)
job = sampler.run(circuit)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Quasi-distribution: {result.quasi_dists[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit).result()
print(f">>> Metadata: {result.metadata[0]}")
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
sampler = Sampler()
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution from the first run: {result.quasi_dists[0]}")
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution from the second run: {result.quasi_dists[0]}")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
estimator_circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(estimator_circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(estimator_circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(circuit)
estimator_job = estimator.run(estimator_circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(circuit)
estimator_job = estimator.run(estimator_circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit.library import QFT
def create_qpe_circuit(theta, num_qubits):
'''Creates a QPE circuit given theta and num_qubits.'''
# Step 1: Create a circuit with two quantum registers and one classical register.
first = QuantumRegister(size=num_qubits, name='first') # the first register for phase estimation
second = QuantumRegister(size=1, name='second') # the second register for storing eigenvector |psi>
classical = ClassicalRegister(size=num_qubits, name='readout') # classical register for readout
qpe_circuit = QuantumCircuit(first, second, classical)
# Step 2: Initialize the qubits.
# All qubits are initialized in |0> by default, no extra code is needed to initialize the first register.
qpe_circuit.x(second) # Initialize the second register with state |psi>, which is |1> in this example.
# Step 3: Create superposition in the first register.
qpe_circuit.barrier() # Add barriers to separate each step of the algorithm for better visualization.
qpe_circuit.h(first)
# Step 4: Apply a controlled-U^(2^j) black box.
qpe_circuit.barrier()
for j in range(num_qubits):
qpe_circuit.cp(theta*2*np.pi*(2**j), j, num_qubits) # Theta doesn't contain the 2 pi factor.
# Step 5: Apply an inverse QFT to the first register.
qpe_circuit.barrier()
qpe_circuit.compose(QFT(num_qubits, inverse=True), inplace=True)
# Step 6: Measure the first register.
qpe_circuit.barrier()
qpe_circuit.measure(first, classical)
return qpe_circuit
num_qubits = 4
qpe_circuit_fixed_phase = create_qpe_circuit(1/2, num_qubits) # Create a QPE circuit with fixed theta=1/2.
qpe_circuit_fixed_phase.draw('mpl')
from qiskit.circuit import Parameter
theta = Parameter('theta') # Create a parameter `theta` whose values can be assigned later.
qpe_circuit_parameterized = create_qpe_circuit(theta, num_qubits)
qpe_circuit_parameterized.draw('mpl')
number_of_phases = 21
phases = np.linspace(0, 2, number_of_phases)
individual_phases = [[ph] for ph in phases] # Phases need to be expressed as a list of lists.
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
results = Sampler().run(
[qpe_circuit_parameterized]*len(individual_phases),
parameter_values=individual_phases
).result()
from qiskit.tools.visualization import plot_histogram
idx = 6
plot_histogram(results.quasi_dists[idx].binary_probabilities(), legend=[f'$\\theta$={phases[idx]:.3f}'])
def most_likely_bitstring(results_dict):
'''Finds the most likely outcome bit string from a result dictionary.'''
return max(results_dict, key=results_dict.get)
def find_neighbors(bitstring):
'''Finds the neighbors of a bit string.
Example:
For bit string '1010', this function returns ('1001', '1011')
'''
if bitstring == len(bitstring)*'0':
neighbor_left = len(bitstring)*'1'
else:
neighbor_left = format((int(bitstring,2)-1), '0%sb'%len(bitstring))
if bitstring == len(bitstring)*'1':
neighbor_right = len(bitstring)*'0'
else:
neighbor_right = format((int(bitstring,2)+1), '0%sb'%len(bitstring))
return (neighbor_left, neighbor_right)
def estimate_phase(results_dict):
'''Estimates the phase from a result dictionary of a QPE circuit.'''
# Find the most likely outcome bit string N1 and its neighbors.
num_1_key = most_likely_bitstring(results_dict)
neighbor_left, neighbor_right = find_neighbors(num_1_key)
# Get probabilities of N1 and its neighbors.
num_1_prob = results_dict.get(num_1_key)
neighbor_left_prob = results_dict.get(neighbor_left)
neighbor_right_prob = results_dict.get(neighbor_right)
# Find the second most likely outcome N2 and its probability P2 among the neighbors.
if neighbor_left_prob is None:
# neighbor_left doesn't exist
if neighbor_right_prob is None:
# both neighbors don't exist, N2 is N1
num_2_key = num_1_key
num_2_prob = num_1_prob
else:
# If only neighbor_left doesn't exist, N2 is neighbor_right.
num_2_key = neighbor_right
num_2_prob = neighbor_right_prob
elif neighbor_right_prob is None:
# If only neighbor_right doesn't exist, N2 is neighbor_left.
num_2_key = neighbor_left
num_2_prob = neighbor_left_prob
elif neighbor_left_prob > neighbor_right_prob:
# Both neighbors exist and neighbor_left has higher probability, so N2 is neighbor_left.
num_2_key = neighbor_left
num_2_prob = neighbor_left_prob
else:
# Both neighbors exist and neighbor_right has higher probability, so N2 is neighor_right.
num_2_key = neighbor_right
num_2_prob = neighbor_right_prob
# Calculate the estimated phases for N1 and N2.
num_qubits = len(num_1_key)
num_1_phase = (int(num_1_key, 2) / 2**num_qubits)
num_2_phase = (int(num_2_key, 2) / 2**num_qubits)
# Calculate the weighted average phase from N1 and N2.
phase_estimated = (num_1_phase * num_1_prob + num_2_phase * num_2_prob) / (num_1_prob + num_2_prob)
return phase_estimated
qpe_solutions = []
for idx, result_dict in enumerate(results.quasi_dists):
qpe_solutions.append(estimate_phase(result_dict.binary_probabilities()))
ideal_solutions = np.append(
phases[:(number_of_phases-1)//2], # first period
np.subtract(phases[(number_of_phases-1)//2:-1], 1) # second period
)
ideal_solutions = np.append(ideal_solutions, np.subtract(phases[-1], 2)) # starting point of the third period
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 6))
plt.plot(phases, ideal_solutions, '--', label='Ideal solutions')
plt.plot(phases, qpe_solutions, 'o', label='QPE solutions')
plt.title('Quantum Phase Estimation Algorithm')
plt.xlabel('Input Phase')
plt.ylabel('Output Phase')
plt.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# load necessary Runtime libraries
from qiskit_ibm_runtime import QiskitRuntimeService, Sampler, Session
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit.circuit import Parameter
from qiskit.opflow import I, X, Z
mu = Parameter('$\\mu$')
ham_pauli = mu * X
cc = Parameter('$c$')
ww = Parameter('$\\omega$')
ham_res = -(1/2)*ww*(I^Z) + cc*(X^X) + (ham_pauli^I)
tt = Parameter('$t$')
U_ham = (tt*ham_res).exp_i()
from qiskit import transpile
from qiskit.circuit import ClassicalRegister
from qiskit.opflow import PauliTrotterEvolution, Suzuki
import numpy as np
num_trot_steps = 5
total_time = 10
cr = ClassicalRegister(1, 'c')
spec_op = PauliTrotterEvolution(trotter_mode=Suzuki(order=2, reps=num_trot_steps)).convert(U_ham)
spec_circ = spec_op.to_circuit()
spec_circ_t = transpile(spec_circ, basis_gates=['sx', 'rz', 'cx'])
spec_circ_t.add_register(cr)
spec_circ_t.measure(0, cr[0])
spec_circ_t.draw('mpl')
# fixed Parameters
fixed_params = {
cc: 0.3,
mu: 0.7,
tt: total_time
}
# Parameter value for single circuit
param_keys = list(spec_circ_t.parameters)
# run through all the ww values to create a List of Lists of Parameter value
num_pts = 101
wvals = np.linspace(-2, 2, num_pts)
param_vals = []
for wval in wvals:
all_params = {**fixed_params, **{ww: wval}}
param_vals.append([all_params[key] for key in param_keys])
with Session(backend=backend):
sampler = Sampler()
job = sampler.run(
circuits=[spec_circ_t]*num_pts,
parameter_values=param_vals,
shots=1e5
)
result = job.result()
Zexps = []
for dist in result.quasi_dists:
if 1 in dist:
Zexps.append(1 - 2*dist[1])
else:
Zexps.append(1)
from qiskit.opflow import PauliExpectation, Zero
param_bind = {
cc: 0.3,
mu: 0.7,
tt: total_time
}
init_state = Zero^2
obsv = I^Z
Zexp_exact = (U_ham @ init_state).adjoint() @ obsv @ (U_ham @ init_state)
diag_meas_op = PauliExpectation().convert(Zexp_exact)
Zexact_values = []
for w_set in wvals:
param_bind[ww] = w_set
Zexact_values.append(np.real(diag_meas_op.bind_parameters(param_bind).eval()))
import matplotlib.pyplot as plt
plt.style.use('dark_background')
fig, ax = plt.subplots(dpi=100)
ax.plot([-param_bind[mu], -param_bind[mu]], [0, 1], ls='--', color='purple')
ax.plot([param_bind[mu], param_bind[mu]], [0, 1], ls='--', color='purple')
ax.plot(wvals, Zexact_values, label='Exact')
ax.plot(wvals, Zexps, label=f"{backend}")
ax.set_xlabel(r'$\omega$ (arb)')
ax.set_ylabel(r'$\langle Z \rangle$ Expectation')
ax.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# Create circuit to test transpiler on
from qiskit import QuantumCircuit
from qiskit.circuit.library import GroverOperator, Diagonal
oracle = Diagonal([1]*7 + [-1])
qc = QuantumCircuit(3)
qc.h([0,1,2])
qc = qc.compose(GroverOperator(oracle))
# Use Statevector object to calculate the ideal output
from qiskit.quantum_info import Statevector
ideal_distribution = Statevector.from_instruction(qc).probabilities_dict()
from qiskit.visualization import plot_histogram
plot_histogram(ideal_distribution)
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = service.backend('ibm_algiers')
# Need to add measurements to the circuit
qc.measure_all()
from qiskit import transpile
circuits = []
for optimization_level in [0, 3]:
t_qc = transpile(qc,
backend,
optimization_level=optimization_level,
seed_transpiler=0)
print(f'CNOTs (optimization_level={optimization_level}): ',
t_qc.count_ops()['cx'])
circuits.append(t_qc)
from qiskit.transpiler import PassManager, InstructionDurations
from qiskit.transpiler.passes import ASAPSchedule, DynamicalDecoupling
from qiskit.circuit.library import XGate
# Get gate durations so the transpiler knows how long each operation takes
durations = InstructionDurations.from_backend(backend)
# This is the sequence we'll apply to idling qubits
dd_sequence = [XGate(), XGate()]
# Run scheduling and dynamic decoupling passes on circuit
pm = PassManager([ASAPSchedule(durations),
DynamicalDecoupling(durations, dd_sequence)]
)
circ_dd = pm.run(circuits[1])
# Add this new circuit to our list
circuits.append(circ_dd)
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
sampler = Sampler()
job = sampler.run(
circuits=circuits, # sample all three circuits
skip_transpilation=True,
shots=8000)
result = job.result()
from qiskit.visualization import plot_histogram
binary_prob = [quasi_dist.binary_probabilities() for quasi_dist in result.quasi_dists]
plot_histogram(binary_prob+[ideal_distribution],
bar_labels=False,
legend=['optimization_level=0',
'optimization_level=3',
'optimization_level=3 + dd',
'ideal distribution'])
from qiskit.quantum_info import hellinger_fidelity
for counts in result.quasi_dists:
print(
f"{hellinger_fidelity(counts.binary_probabilities(), ideal_distribution):.3f}"
)
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver(
atom="H 0 0 0; H 0 0 0.72" # Two Hydrogen atoms, 0.72 Angstrom apart
)
molecule = driver.run()
from qiskit_nature.second_q.mappers import QubitConverter, ParityMapper
qubit_converter = QubitConverter(ParityMapper())
hamiltonian = qubit_converter.convert(molecule.second_q_ops()[0])
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
sol = NumPyMinimumEigensolver().compute_minimum_eigenvalue(hamiltonian)
real_solution = molecule.interpret(sol)
real_solution.groundenergy
from qiskit_ibm_runtime import QiskitRuntimeService, Estimator, Session, Options
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator"
from qiskit.algorithms.minimum_eigensolvers import VQE
# Use RealAmplitudes circuit to create trial states
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(num_qubits=2, reps=2)
# Search for better states using SPSA algorithm
from qiskit.algorithms.optimizers import SPSA
optimizer = SPSA(150)
# Set a starting point for reproduceability
import numpy as np
np.random.seed(6)
initial_point = np.random.uniform(-np.pi, np.pi, 12)
# Create an object to store intermediate results
from dataclasses import dataclass
@dataclass
class VQELog:
values: list
parameters: list
def update(self, count, parameters, mean, _metadata):
self.values.append(mean)
self.parameters.append(parameters)
print(f"Running circuit {count} of ~350", end="\r", flush=True)
log = VQELog([],[])
# Main calculation
with Session(service=service, backend=backend) as session:
options = Options()
options.optimization_level = 3
vqe = VQE(Estimator(session=session, options=options),
ansatz, optimizer, callback=log.update, initial_point=initial_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print("Experiment complete.".ljust(30))
print(f"Raw result: {result.optimal_value}")
if 'simulator' not in backend:
# Run once with ZNE error mitigation
options.resilience_level = 2
vqe = VQE(Estimator(session=session, options=options),
ansatz, SPSA(1), initial_point=result.optimal_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print(f"Mitigated result: {result.optimal_value}")
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
# Plot energy and reference value
plt.figure(figsize=(12, 6))
plt.plot(log.values, label="Estimator VQE")
plt.axhline(y=real_solution.groundenergy, color="tab:red", ls="--", label="Target")
plt.legend(loc="best")
plt.xlabel("Iteration")
plt.ylabel("Energy [H]")
plt.title("VQE energy")
plt.show()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit import Parameter
from qiskit import QuantumCircuit
theta = Parameter('$\\theta$')
chsh_circuits_no_meas = QuantumCircuit(2)
chsh_circuits_no_meas.h(0)
chsh_circuits_no_meas.cx(0, 1)
chsh_circuits_no_meas.ry(theta, 0)
chsh_circuits_no_meas.draw('mpl')
import numpy as np
number_of_phases = 21
phases = np.linspace(0, 2*np.pi, number_of_phases)
# Phases need to be expressed as list of lists in order to work
individual_phases = [[ph] for ph in phases]
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Estimator, Session
from qiskit.quantum_info import SparsePauliOp
ZZ = SparsePauliOp.from_list([("ZZ", 1)])
ZX = SparsePauliOp.from_list([("ZX", 1)])
XZ = SparsePauliOp.from_list([("XZ", 1)])
XX = SparsePauliOp.from_list([("XX", 1)])
ops = [ZZ, ZX, XZ, XX]
chsh_est_sim = []
# Simulator
with Session(service=service, backend=backend):
estimator = Estimator()
for op in ops:
job = estimator.run(
circuits=[chsh_circuits_no_meas]*len(individual_phases),
observables=[op]*len(individual_phases),
parameter_values=individual_phases)
est_result = job.result()
chsh_est_sim.append(est_result)
# <CHSH1> = <AB> - <Ab> + <aB> + <ab>
chsh1_est_sim = chsh_est_sim[0].values - chsh_est_sim[1].values + chsh_est_sim[2].values + chsh_est_sim[3].values
# <CHSH2> = <AB> + <Ab> - <aB> + <ab>
chsh2_est_sim = chsh_est_sim[0].values + chsh_est_sim[1].values - chsh_est_sim[2].values + chsh_est_sim[3].values
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
fig, ax = plt.subplots(figsize=(10, 6))
# results from a simulator
ax.plot(phases/np.pi, chsh1_est_sim, 'o-', label='CHSH1 Simulation')
ax.plot(phases/np.pi, chsh2_est_sim, 'o-', label='CHSH2 Simulation')
# classical bound +-2
ax.axhline(y=2, color='r', linestyle='--')
ax.axhline(y=-2, color='r', linestyle='--')
# quantum bound, +-2√2
ax.axhline(y=np.sqrt(2)*2, color='b', linestyle='-.')
ax.axhline(y=-np.sqrt(2)*2, color='b', linestyle='-.')
# set x tick labels to the unit of pi
ax.xaxis.set_major_formatter(tck.FormatStrFormatter('%g $\pi$'))
ax.xaxis.set_major_locator(tck.MultipleLocator(base=0.5))
# set title, labels, and legend
plt.title('Violation of CHSH Inequality')
plt.xlabel('Theta')
plt.ylabel('CHSH witness')
plt.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rcParams.update({"text.usetex": True})
plt.rcParams["figure.figsize"] = (6,4)
mpl.rcParams["figure.dpi"] = 200
from qiskit_ibm_runtime import Estimator, Session, QiskitRuntimeService, Options
from qiskit.quantum_info import SparsePauliOp
from qiskit import QuantumCircuit
service = QiskitRuntimeService()
backend_simulator = "backend_simulator"
backend = "ibmq_montreal"
qubits = 4
trotter_layer = QuantumCircuit(qubits)
trotter_layer.rx(0.1, range(qubits))
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.rz(-0.2, [1, 3])
trotter_layer.cx(0, 1)
trotter_layer.cx(2, 3)
trotter_layer.cx(1, 2)
trotter_layer.rz(-0.2, 2)
trotter_layer.cx(1, 2)
num_steps = 6
trotter_circuit_list = []
for i in range(1, num_steps):
trotter_circuit = QuantumCircuit(qubits)
for _ in range(i):
trotter_circuit = trotter_circuit.compose(trotter_layer)
trotter_circuit_list.append(trotter_circuit)
print(f'Trotter circuit with {i} Trotter steps`)
display(trotter_circuit.draw(fold=-1))
obs = SparsePauliOp("Z"*qubits)
obs_list = [obs]*len(trotter_circuit_list)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No mitigation
with Session(service=service, backend=backend_simulator) as session:
estimator_sim = Estimator(session=session, options=options)
job_sim = estimator_sim.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_sim.job_id)
print(job_sim.result())
expvals_ideal = job_sim.result().values
expvals_ideal_variance = [metadata['variance']/metadata['shots'] for metadata in job_sim.result().metadata]
std_error_ideal = np.sqrt(expvals_ideal_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job.job_id)
print(job.result())
expvals_unmit = job.result().values
expvals_unmit_variance = [metadata['variance']/metadata['shots'] for metadata in job.result().metadata]
std_error_unmit = np.sqrt(expvals_unmit_variance)
options = Options()
options.execution.shots = 1000
options.optimization_level = 3 # Dynamical decoupling
options.resilience_level = 0 # No error mitigation
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_dd = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_dd.job_id)
print(job_dd.result())
expvals_unmit_dd = job_dd.result().values
expvals_unmit_dd_variance = [metadata['variance']/metadata['shots'] for metadata in job_dd.result().metadata]
std_error_dd = np.sqrt(expvals_unmit_dd_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_dd, std_error_dd, fmt = 'o', linestyle = '-', capsize=4, c='blue', label='Dynamical decoupling')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.resilience_level = 1 # T-REx
options.optimization_level = 0 # No optimization
options.execution.shots = 1000
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_trex = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_trex.job_id)
print(job_trex.result())
expvals_unmit_trex = job_trex.result().values
expvals_unmit_trex_variance = [metadata['variance']/metadata['shots'] for metadata in job_trex.result().metadata]
std_error_trex = np.sqrt(expvals_unmit_trex_variance)
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # No optimization
options.resilience_level = 2 # ZNE
with Session(service=service, backend=backend) as session:
estimator = Estimator(session=session, options=options)
job_zne = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne.job_id)
print(job_zne.result())
expvals_unmit_zne = job_zne.result().values
# Standard error: coming soon!
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.xlabel('No. Trotter Steps')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
def interim_results_callback(job_id, result):
now = datetime.datetime.now()
print(now, "*** Callback ***", result, "\n")
options = Options()
options.optimization_level = 0 # No optimization
options.execution.shots = 100
options.resilience_level = 3 # PEC
options.environment.callback = interim_results_callback
with Session(service=service, backend=backend) as session:
estimator_pec = Estimator(session=session, options=options)
job_pec = estimator_pec.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_pec.job_id)
expvals_pec = job_pec.result().values
std_error_pec = [metadata['standard_error'] for metadata in job_pec.result().metadata]
plt.title('Trotter circuits expectation value')
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
print(job_pec.result())
pec_metadata = job_pec.result().metadata
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
twin1 = ax.twinx()
twin2 = ax.twinx()
twin3 = ax.twinx()
twin2.spines.right.set_position(("axes", 1.2))
twin3.spines.right.set_position(("axes", 1.4))
p1, = ax.plot(range(1, num_steps), [m["total_mitigated_layers"] for m in pec_metadata] , "b-", label="Total mitigated layers")
p2, = twin1.plot(range(1, num_steps), [m["sampling_overhead"] for m in pec_metadata], "r-", label="Sampling overhead")
p3, = twin2.plot(range(1, num_steps), [m["samples"] for m in pec_metadata], "g-", label="Samples")
p4, = twin3.plot(range(1, num_steps), [m["shots"] for m in pec_metadata], "c-", label="Shots")
ax.set_ylim(0, 20)
twin1.set_ylim(0, 2.8)
twin2.set_ylim(0, 300)
twin3.set_ylim(0, 35000)
ax.set_xlabel("No. Trotter Steps")
ax.set_ylabel("Total mitigated layers")
twin1.set_ylabel("Sampling overhead")
twin2.set_ylabel("Samples")
twin3.set_ylabel("Shots")
ax.yaxis.label.set_color(p1.get_color())
twin1.yaxis.label.set_color(p2.get_color())
twin2.yaxis.label.set_color(p3.get_color())
twin3.yaxis.label.set_color(p4.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
twin1.tick_params(axis='y', colors=p2.get_color(), **tkw)
twin2.tick_params(axis='y', colors=p3.get_color(), **tkw)
twin3.tick_params(axis='y', colors=p4.get_color(), **tkw)
plt.xticks([1, 2, 3, 4, 5])
ax.legend(handles=[p1, p2, p3, p4])
plt.title('PEC metadata')
plt.show()
from matplotlib.pyplot import figure
plt.errorbar(range(1, num_steps), expvals_ideal, std_error_ideal, fmt = 'o', linestyle = '--', capsize=4, c='red', label='Ideal')
plt.errorbar(range(1, num_steps), expvals_unmit, std_error_unmit, fmt = 'o', linestyle = '-', capsize=4, c='green', label='No mitigation')
plt.errorbar(range(1, num_steps), expvals_unmit_trex, std_error_trex, fmt = 'o', linestyle = '-', capsize=4, c='violet', label='T-REx')
plt.errorbar(range(1, num_steps), expvals_unmit_zne, [0]*(num_steps-1), fmt = 'o', linestyle = '-', capsize=4, c='cyan', label='ZNE')
plt.errorbar(range(1, num_steps), expvals_pec, std_error_pec, fmt = 'd', linestyle = '-', capsize=4, c='orange', label='PEC')
plt.title('Trotter circuits expectation value')
plt.ylabel(f"$\langle ZZZZ \\rangle$")
plt.xlabel('No. Trotter Steps')
plt.xticks([1, 2, 3, 4, 5])
plt.legend()
plt.show()
options = Options()
options.execution.shots = 1000
options.optimization_level = 0 # no optimization
options.resilience_level = 2 # ZNE
options.resilience.noise_factors = [1, 2, 3, 4]
options.resilience.noise_amplifier = "LocalFoldingAmplifier"
options.resilience.extrapolator = "QuadraticExtrapolator"
with Session(service=service, backend='ibmq_montreal') as session:
estimator = Estimator(session=session, options=options)
job_zne_options = estimator.run(circuits=trotter_circuit_list, observables=obs_list)
print('job id:', job_zne_options.job_id)
print(job_zne_options.result())
from qiskit.tools import jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import random
from qiskit.quantum_info import Statevector
secret = random.randint(0,7) # the owner is randomly picked
secret_string = format(secret, '03b') # format the owner in 3-bit string
oracle = Statevector.from_label(secret_string) # let the oracle know the owner
from qiskit.algorithms import AmplificationProblem
problem = AmplificationProblem(oracle, is_good_state=secret_string)
from qiskit.algorithms import Grover
grover_circuits = []
for iteration in range(1,3):
grover = Grover(iterations=iteration)
circuit = grover.construct_circuit(problem)
circuit.measure_all()
grover_circuits.append(circuit)
# Grover's circuit with 1 iteration
grover_circuits[0].draw()
# Grover's circuit with 2 iterations
grover_circuits[1].draw()
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
sampler = Sampler()
job = sampler.run(circuits=grover_circuits, shots=1000)
result = job.result()
print(result)
from qiskit.tools.visualization import plot_histogram
# Extract bit string with highest probability from results as the answer
result_dict = result.quasi_dists[1].binary_probabilities()
answer = max(result_dict, key=result_dict.get)
print(f"As you can see, the quantum computer returned '{answer}' as the answer with highest probability.\n"
"And the results with 2 iterations have higher probability than the results with 1 iteration."
)
# Plot the results
plot_histogram(result.quasi_dists, legend=['1 iteration', '2 iterations'])
# Print the results and the correct answer.
print(f"Quantum answer: {answer}")
print(f"Correct answer: {secret_string}")
print('Success!' if answer == secret_string else 'Failure!')
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.quantum_info import SparsePauliOp
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit.primitives import Estimator
estimator = Estimator()
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
circuit = random_circuit(2, 2, seed=1).decompose(reps=1)
observable = SparsePauliOp("IY")
job = estimator.run(circuit, observable)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Expectation value: {result.values[0]}")
circuits = (
random_circuit(2, 2, seed=0).decompose(reps=1),
random_circuit(2, 2, seed=1).decompose(reps=1),
)
observables = (
SparsePauliOp("XZ"),
SparsePauliOp("IY"),
)
job = estimator.run(circuits, observables)
result = job.result()
[display(cir.draw("mpl")) for cir in circuits]
print(f">>> Observables: {[obs.paulis for obs in observables]}")
print(f">>> Expectation values: {result.values.tolist()}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
observable = SparsePauliOp("ZI")
parameter_values = [0, 1, 2, 3, 4, 5]
job = estimator.run(circuit, observable, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Observable: {observable.paulis}")
print(f">>> Parameter values: {parameter_values}")
print(f">>> Expectation value: {result.values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Estimator
estimator = Estimator(session=backend)
job = estimator.run(circuit, observable)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Expectation value: {result.values[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Metadata: {result.metadata[0]}")
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
estimator = Estimator(session=backend, options=options)
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value: {result.values[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
estimator = Estimator()
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the first run: {result.values[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the second run: {result.values[0]}")
from qiskit.circuit.random import random_circuit
sampler_circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
sampler_circuit.measure_all()
display(circuit.draw("mpl"))
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(sampler_circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(sampler_circuit)
estimator_job = estimator.run(circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0, measure=True).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit.primitives import Sampler
sampler = Sampler()
job = sampler.run(circuit)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Quasi-distribution: {result.quasi_dists[0]}")
circuit = random_circuit(2, 2, seed=1, measure=True).decompose(reps=1)
job = sampler.run(circuit)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
circuits = (
random_circuit(2, 2, seed=0, measure=True).decompose(reps=1),
random_circuit(2, 2, seed=1, measure=True).decompose(reps=1),
)
job = sampler.run(circuits)
result = job.result()
[display(cir.draw("mpl")) for cir in circuits]
print(f">>> Quasi-distribution: {result.quasi_dists}")
from qiskit.circuit.library import RealAmplitudes
circuit = RealAmplitudes(num_qubits=2, reps=2).decompose(reps=1)
circuit.measure_all()
parameter_values = [0, 1, 2, 3, 4, 5]
job = sampler.run(circuit, parameter_values)
result = job.result()
display(circuit.draw("mpl"))
print(f">>> Parameter values: {parameter_values}")
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService(channel="ibm_quantum")
backend = service.backend("ibmq_qasm_simulator")
from qiskit.circuit.random import random_circuit
circuit = random_circuit(2, 2, seed=0, measure=True).decompose(reps=1)
display(circuit.draw("mpl"))
from qiskit_ibm_runtime import Sampler
sampler = Sampler(session=backend)
job = sampler.run(circuit)
print(f">>> Job ID: {job.job_id()}")
print(f">>> Job Status: {job.status()}")
result = job.result()
print(f">>> {result}")
print(f" > Quasi-distribution: {result.quasi_dists[0]}")
print(f" > Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
options = Options(optimization_level=3, environment={"log_level": "INFO"})
from qiskit_ibm_runtime import Options
options = Options()
options.resilience_level = 1
options.execution.shots = 2048
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit).result()
print(f">>> Metadata: {result.metadata[0]}")
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit, shots=1024).result()
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Options
# optimization_level=3 adds dynamical decoupling
# resilience_level=1 adds readout error mitigation
options = Options(optimization_level=3, resilience_level=1)
sampler = Sampler(session=backend, options=options)
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution: {result.quasi_dists[0]}")
print(f">>> Metadata: {result.metadata[0]}")
from qiskit_ibm_runtime import Session, Estimator
with Session(backend=backend, max_time="1h"):
sampler = Sampler()
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution from the first run: {result.quasi_dists[0]}")
result = sampler.run(circuit).result()
print(f">>> Quasi-distribution from the second run: {result.quasi_dists[0]}")
from qiskit.circuit.random import random_circuit
from qiskit.quantum_info import SparsePauliOp
estimator_circuit = random_circuit(2, 2, seed=0).decompose(reps=1)
display(estimator_circuit.draw("mpl"))
observable = SparsePauliOp("XZ")
print(f">>> Observable: {observable.paulis}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
result = sampler.run(circuit).result()
print(f">>> Quasi Distribution from the sampler job: {result.quasi_dists[0]}")
result = estimator.run(estimator_circuit, observable).result()
print(f">>> Expectation value from the estimator job: {result.values[0]}")
from qiskit_ibm_runtime import Session, Sampler, Estimator
with Session(backend=backend):
sampler = Sampler()
estimator = Estimator()
sampler_job = sampler.run(circuit)
estimator_job = estimator.run(estimator_circuit, observable)
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
# 1. Initialize account
service = QiskitRuntimeService(channel="ibm_quantum")
# 2. Specify options, such as enabling error mitigation
options = Options(resilience_level=1)
# 3. Select a backend.
backend = service.backend("ibmq_qasm_simulator")
# 4. Create a session
with Session(backend=backend):
# 5. Create primitive instances
sampler = Sampler(options=options)
estimator = Estimator(options=options)
# 6. Submit jobs
sampler_job = sampler.run(circuit)
estimator_job = estimator.run(estimator_circuit, observable)
# 7. Get results
print(f">>> Quasi Distribution from the sampler job: {sampler_job.result().quasi_dists[0]}")
print(f">>> Expectation value from the estimator job: {estimator_job.result().values[0]}")
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit.library import QFT
def create_qpe_circuit(theta, num_qubits):
'''Creates a QPE circuit given theta and num_qubits.'''
# Step 1: Create a circuit with two quantum registers and one classical register.
first = QuantumRegister(size=num_qubits, name='first') # the first register for phase estimation
second = QuantumRegister(size=1, name='second') # the second register for storing eigenvector |psi>
classical = ClassicalRegister(size=num_qubits, name='readout') # classical register for readout
qpe_circuit = QuantumCircuit(first, second, classical)
# Step 2: Initialize the qubits.
# All qubits are initialized in |0> by default, no extra code is needed to initialize the first register.
qpe_circuit.x(second) # Initialize the second register with state |psi>, which is |1> in this example.
# Step 3: Create superposition in the first register.
qpe_circuit.barrier() # Add barriers to separate each step of the algorithm for better visualization.
qpe_circuit.h(first)
# Step 4: Apply a controlled-U^(2^j) black box.
qpe_circuit.barrier()
for j in range(num_qubits):
qpe_circuit.cp(theta*2*np.pi*(2**j), j, num_qubits) # Theta doesn't contain the 2 pi factor.
# Step 5: Apply an inverse QFT to the first register.
qpe_circuit.barrier()
qpe_circuit.compose(QFT(num_qubits, inverse=True), inplace=True)
# Step 6: Measure the first register.
qpe_circuit.barrier()
qpe_circuit.measure(first, classical)
return qpe_circuit
num_qubits = 4
qpe_circuit_fixed_phase = create_qpe_circuit(1/2, num_qubits) # Create a QPE circuit with fixed theta=1/2.
qpe_circuit_fixed_phase.draw('mpl')
from qiskit.circuit import Parameter
theta = Parameter('theta') # Create a parameter `theta` whose values can be assigned later.
qpe_circuit_parameterized = create_qpe_circuit(theta, num_qubits)
qpe_circuit_parameterized.draw('mpl')
number_of_phases = 21
phases = np.linspace(0, 2, number_of_phases)
individual_phases = [[ph] for ph in phases] # Phases need to be expressed as a list of lists.
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
results = Sampler().run(
[qpe_circuit_parameterized]*len(individual_phases),
parameter_values=individual_phases
).result()
from qiskit.tools.visualization import plot_histogram
idx = 6
plot_histogram(results.quasi_dists[idx].binary_probabilities(), legend=[f'$\\theta$={phases[idx]:.3f}'])
def most_likely_bitstring(results_dict):
'''Finds the most likely outcome bit string from a result dictionary.'''
return max(results_dict, key=results_dict.get)
def find_neighbors(bitstring):
'''Finds the neighbors of a bit string.
Example:
For bit string '1010', this function returns ('1001', '1011')
'''
if bitstring == len(bitstring)*'0':
neighbor_left = len(bitstring)*'1'
else:
neighbor_left = format((int(bitstring,2)-1), '0%sb'%len(bitstring))
if bitstring == len(bitstring)*'1':
neighbor_right = len(bitstring)*'0'
else:
neighbor_right = format((int(bitstring,2)+1), '0%sb'%len(bitstring))
return (neighbor_left, neighbor_right)
def estimate_phase(results_dict):
'''Estimates the phase from a result dictionary of a QPE circuit.'''
# Find the most likely outcome bit string N1 and its neighbors.
num_1_key = most_likely_bitstring(results_dict)
neighbor_left, neighbor_right = find_neighbors(num_1_key)
# Get probabilities of N1 and its neighbors.
num_1_prob = results_dict.get(num_1_key)
neighbor_left_prob = results_dict.get(neighbor_left)
neighbor_right_prob = results_dict.get(neighbor_right)
# Find the second most likely outcome N2 and its probability P2 among the neighbors.
if neighbor_left_prob is None:
# neighbor_left doesn't exist
if neighbor_right_prob is None:
# both neighbors don't exist, N2 is N1
num_2_key = num_1_key
num_2_prob = num_1_prob
else:
# If only neighbor_left doesn't exist, N2 is neighbor_right.
num_2_key = neighbor_right
num_2_prob = neighbor_right_prob
elif neighbor_right_prob is None:
# If only neighbor_right doesn't exist, N2 is neighbor_left.
num_2_key = neighbor_left
num_2_prob = neighbor_left_prob
elif neighbor_left_prob > neighbor_right_prob:
# Both neighbors exist and neighbor_left has higher probability, so N2 is neighbor_left.
num_2_key = neighbor_left
num_2_prob = neighbor_left_prob
else:
# Both neighbors exist and neighbor_right has higher probability, so N2 is neighor_right.
num_2_key = neighbor_right
num_2_prob = neighbor_right_prob
# Calculate the estimated phases for N1 and N2.
num_qubits = len(num_1_key)
num_1_phase = (int(num_1_key, 2) / 2**num_qubits)
num_2_phase = (int(num_2_key, 2) / 2**num_qubits)
# Calculate the weighted average phase from N1 and N2.
phase_estimated = (num_1_phase * num_1_prob + num_2_phase * num_2_prob) / (num_1_prob + num_2_prob)
return phase_estimated
qpe_solutions = []
for idx, result_dict in enumerate(results.quasi_dists):
qpe_solutions.append(estimate_phase(result_dict.binary_probabilities()))
ideal_solutions = np.append(
phases[:(number_of_phases-1)//2], # first period
np.subtract(phases[(number_of_phases-1)//2:-1], 1) # second period
)
ideal_solutions = np.append(ideal_solutions, np.subtract(phases[-1], 2)) # starting point of the third period
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 6))
plt.plot(phases, ideal_solutions, '--', label='Ideal solutions')
plt.plot(phases, qpe_solutions, 'o', label='QPE solutions')
plt.title('Quantum Phase Estimation Algorithm')
plt.xlabel('Input Phase')
plt.ylabel('Output Phase')
plt.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# load necessary Runtime libraries
from qiskit_ibm_runtime import QiskitRuntimeService, Sampler, Session
backend = "ibmq_qasm_simulator" # use the simulator
from qiskit.circuit import Parameter
from qiskit.opflow import I, X, Z
mu = Parameter('$\\mu$')
ham_pauli = mu * X
cc = Parameter('$c$')
ww = Parameter('$\\omega$')
ham_res = -(1/2)*ww*(I^Z) + cc*(X^X) + (ham_pauli^I)
tt = Parameter('$t$')
U_ham = (tt*ham_res).exp_i()
from qiskit import transpile
from qiskit.circuit import ClassicalRegister
from qiskit.opflow import PauliTrotterEvolution, Suzuki
import numpy as np
num_trot_steps = 5
total_time = 10
cr = ClassicalRegister(1, 'c')
spec_op = PauliTrotterEvolution(trotter_mode=Suzuki(order=2, reps=num_trot_steps)).convert(U_ham)
spec_circ = spec_op.to_circuit()
spec_circ_t = transpile(spec_circ, basis_gates=['sx', 'rz', 'cx'])
spec_circ_t.add_register(cr)
spec_circ_t.measure(0, cr[0])
spec_circ_t.draw('mpl')
# fixed Parameters
fixed_params = {
cc: 0.3,
mu: 0.7,
tt: total_time
}
# Parameter value for single circuit
param_keys = list(spec_circ_t.parameters)
# run through all the ww values to create a List of Lists of Parameter value
num_pts = 101
wvals = np.linspace(-2, 2, num_pts)
param_vals = []
for wval in wvals:
all_params = {**fixed_params, **{ww: wval}}
param_vals.append([all_params[key] for key in param_keys])
with Session(backend=backend):
sampler = Sampler()
job = sampler.run(
circuits=[spec_circ_t]*num_pts,
parameter_values=param_vals,
shots=1e5
)
result = job.result()
Zexps = []
for dist in result.quasi_dists:
if 1 in dist:
Zexps.append(1 - 2*dist[1])
else:
Zexps.append(1)
from qiskit.opflow import PauliExpectation, Zero
param_bind = {
cc: 0.3,
mu: 0.7,
tt: total_time
}
init_state = Zero^2
obsv = I^Z
Zexp_exact = (U_ham @ init_state).adjoint() @ obsv @ (U_ham @ init_state)
diag_meas_op = PauliExpectation().convert(Zexp_exact)
Zexact_values = []
for w_set in wvals:
param_bind[ww] = w_set
Zexact_values.append(np.real(diag_meas_op.bind_parameters(param_bind).eval()))
import matplotlib.pyplot as plt
plt.style.use('dark_background')
fig, ax = plt.subplots(dpi=100)
ax.plot([-param_bind[mu], -param_bind[mu]], [0, 1], ls='--', color='purple')
ax.plot([param_bind[mu], param_bind[mu]], [0, 1], ls='--', color='purple')
ax.plot(wvals, Zexact_values, label='Exact')
ax.plot(wvals, Zexps, label=f"{backend}")
ax.set_xlabel(r'$\omega$ (arb)')
ax.set_ylabel(r'$\langle Z \rangle$ Expectation')
ax.legend()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# Create circuit to test transpiler on
from qiskit import QuantumCircuit
from qiskit.circuit.library import GroverOperator, Diagonal
oracle = Diagonal([1]*7 + [-1])
qc = QuantumCircuit(3)
qc.h([0,1,2])
qc = qc.compose(GroverOperator(oracle))
# Use Statevector object to calculate the ideal output
from qiskit.quantum_info import Statevector
ideal_distribution = Statevector.from_instruction(qc).probabilities_dict()
from qiskit.visualization import plot_histogram
plot_histogram(ideal_distribution)
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
backend = service.backend('ibm_algiers')
# Need to add measurements to the circuit
qc.measure_all()
from qiskit import transpile
circuits = []
for optimization_level in [0, 3]:
t_qc = transpile(qc,
backend,
optimization_level=optimization_level,
seed_transpiler=0)
print(f'CNOTs (optimization_level={optimization_level}): ',
t_qc.count_ops()['cx'])
circuits.append(t_qc)
from qiskit.transpiler import PassManager, InstructionDurations
from qiskit.transpiler.passes import ASAPSchedule, DynamicalDecoupling
from qiskit.circuit.library import XGate
# Get gate durations so the transpiler knows how long each operation takes
durations = InstructionDurations.from_backend(backend)
# This is the sequence we'll apply to idling qubits
dd_sequence = [XGate(), XGate()]
# Run scheduling and dynamic decoupling passes on circuit
pm = PassManager([ASAPSchedule(durations),
DynamicalDecoupling(durations, dd_sequence)]
)
circ_dd = pm.run(circuits[1])
# Add this new circuit to our list
circuits.append(circ_dd)
from qiskit_ibm_runtime import Sampler, Session
with Session(service=service, backend=backend):
sampler = Sampler()
job = sampler.run(
circuits=circuits, # sample all three circuits
skip_transpilation=True,
shots=8000)
result = job.result()
from qiskit.visualization import plot_histogram
binary_prob = [quasi_dist.binary_probabilities() for quasi_dist in result.quasi_dists]
plot_histogram(binary_prob+[ideal_distribution],
bar_labels=False,
legend=['optimization_level=0',
'optimization_level=3',
'optimization_level=3 + dd',
'ideal distribution'])
from qiskit.quantum_info import hellinger_fidelity
for counts in result.quasi_dists:
print(
f"{hellinger_fidelity(counts.binary_probabilities(), ideal_distribution):.3f}"
)
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
from qiskit.tools.jupyter import *
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver(
atom="H 0 0 0; H 0 0 0.72" # Two Hydrogen atoms, 0.72 Angstrom apart
)
molecule = driver.run()
from qiskit_nature.second_q.mappers import QubitConverter, ParityMapper
qubit_converter = QubitConverter(ParityMapper())
hamiltonian = qubit_converter.convert(molecule.second_q_ops()[0])
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
sol = NumPyMinimumEigensolver().compute_minimum_eigenvalue(hamiltonian)
real_solution = molecule.interpret(sol)
real_solution.groundenergy
from qiskit_ibm_runtime import QiskitRuntimeService, Estimator, Session, Options
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator"
from qiskit.algorithms.minimum_eigensolvers import VQE
# Use RealAmplitudes circuit to create trial states
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(num_qubits=2, reps=2)
# Search for better states using SPSA algorithm
from qiskit.algorithms.optimizers import SPSA
optimizer = SPSA(150)
# Set a starting point for reproduceability
import numpy as np
np.random.seed(6)
initial_point = np.random.uniform(-np.pi, np.pi, 12)
# Create an object to store intermediate results
from dataclasses import dataclass
@dataclass
class VQELog:
values: list
parameters: list
def update(self, count, parameters, mean, _metadata):
self.values.append(mean)
self.parameters.append(parameters)
print(f"Running circuit {count} of ~350", end="\r", flush=True)
log = VQELog([],[])
# Main calculation
with Session(service=service, backend=backend) as session:
options = Options()
options.optimization_level = 3
vqe = VQE(Estimator(session=session, options=options),
ansatz, optimizer, callback=log.update, initial_point=initial_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print("Experiment complete.".ljust(30))
print(f"Raw result: {result.optimal_value}")
if 'simulator' not in backend:
# Run once with ZNE error mitigation
options.resilience_level = 2
vqe = VQE(Estimator(session=session, options=options),
ansatz, SPSA(1), initial_point=result.optimal_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print(f"Mitigated result: {result.optimal_value}")
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
# Plot energy and reference value
plt.figure(figsize=(12, 6))
plt.plot(log.values, label="Estimator VQE")
plt.axhline(y=real_solution.groundenergy, color="tab:red", ls="--", label="Target")
plt.legend(loc="best")
plt.xlabel("Iteration")
plt.ylabel("Energy [H]")
plt.title("VQE energy")
plt.show()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-bip-mapper
|
qiskit-community
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the BIPMapping pass."""
import unittest
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.circuit import Barrier
from qiskit.circuit.library.standard_gates import SwapGate
from qiskit.converters import circuit_to_dag
from qiskit.providers.fake_provider import FakeLima
from qiskit.transpiler import CouplingMap, Layout, PassManager
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.transpiler.passes import CheckMap, Collect2qBlocks, ConsolidateBlocks, UnitarySynthesis
from qiskit_bip_mapper.bip_mapping import BIPMapping
class TestBIPMapping(unittest.TestCase):
"""Tests the BIPMapping pass."""
def test_empty(self):
"""Returns the original circuit if the circuit is empty."""
coupling = CouplingMap([[0, 1]])
circuit = QuantumCircuit(2)
actual = BIPMapping(coupling)(circuit)
self.assertEqual(circuit, actual)
def test_no_two_qubit_gates(self):
"""Returns the original circuit if the circuit has no 2q-gates.
q0:--[H]--
q1:-------
CouplingMap map: [0]--[1]
"""
coupling = CouplingMap([[0, 1]])
circuit = QuantumCircuit(2)
circuit.h(0)
actual = BIPMapping(coupling)(circuit)
self.assertEqual(circuit, actual)
def test_trivial_case(self):
"""No need to have any swap, the CX are distance 1 to each other.
q0:--(+)-[H]-(+)-
| |
q1:---.-------|--
|
q2:-----------.--
CouplingMap map: [1]--[0]--[2]
"""
coupling = CouplingMap([[0, 1], [0, 2]])
circuit = QuantumCircuit(3)
circuit.cx(1, 0)
circuit.h(0)
circuit.cx(2, 0)
actual = BIPMapping(coupling)(circuit)
self.assertEqual(3, len(actual))
for inst, _, _ in actual.data: # there are no swaps
self.assertFalse(isinstance(inst, SwapGate))
def test_no_swap(self):
"""Adding no swap if not giving initial layout."""
coupling = CouplingMap([[0, 1], [0, 2]])
circuit = QuantumCircuit(3)
circuit.cx(1, 2)
actual = BIPMapping(coupling)(circuit)
q = QuantumRegister(3, name="q")
expected = QuantumCircuit(q)
expected.cx(q[0], q[1])
self.assertEqual(expected, actual)
def test_ignore_initial_layout(self):
"""Ignoring initial layout even when it is supplied."""
coupling = CouplingMap([[0, 1], [0, 2]])
circuit = QuantumCircuit(3)
circuit.cx(1, 2)
property_set = {"layout": Layout.generate_trivial_layout(*circuit.qubits)}
actual = BIPMapping(coupling)(circuit, property_set)
q = QuantumRegister(3, name="q")
expected = QuantumCircuit(q)
expected.cx(q[0], q[1])
self.assertEqual(expected, actual)
def test_can_map_measurements_correctly(self):
"""Verify measurement nodes are updated to map correct cregs to re-mapped qregs."""
coupling = CouplingMap([[0, 1], [0, 2]])
qr = QuantumRegister(3, "qr")
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[1], qr[2])
circuit.measure(qr[1], cr[0])
circuit.measure(qr[2], cr[1])
actual = BIPMapping(coupling)(circuit)
q = QuantumRegister(3, "q")
expected = QuantumCircuit(q, cr)
expected.cx(q[0], q[1])
expected.measure(q[0], cr[0]) # <- changed due to initial layout change
expected.measure(q[1], cr[1]) # <- changed due to initial layout change
self.assertEqual(expected, actual)
def test_never_modify_mapped_circuit(self):
"""Test that the mapping is idempotent.
It should not modify a circuit which is already compatible with the
coupling map, and can be applied repeatedly without modifying the circuit.
"""
coupling = CouplingMap([[0, 1], [0, 2]])
circuit = QuantumCircuit(3, 2)
circuit.cx(1, 2)
circuit.measure(1, 0)
circuit.measure(2, 1)
dag = circuit_to_dag(circuit)
mapped_dag = BIPMapping(coupling).run(dag)
remapped_dag = BIPMapping(coupling).run(mapped_dag)
self.assertEqual(mapped_dag, remapped_dag)
def test_no_swap_multi_layer(self):
"""Can find the best layout for a circuit with multiple layers."""
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, name="qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[0])
circuit.cx(qr[0], qr[3])
property_set = {}
actual = BIPMapping(coupling, objective="depth")(circuit, property_set)
self.assertEqual(2, actual.depth())
CheckMap(coupling)(actual, property_set)
self.assertTrue(property_set["is_swap_mapped"])
def test_unmappable_cnots_in_a_layer(self):
"""Test mapping of a circuit with 2 cnots in a layer which BIPMapping cannot map."""
qr = QuantumRegister(4, "q")
cr = ClassicalRegister(4, "c")
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[0], qr[1])
circuit.cx(qr[2], qr[3])
circuit.measure(qr, cr)
coupling = CouplingMap([[0, 1], [1, 2], [1, 3]]) # {0: [1], 1: [2, 3]}
actual = BIPMapping(coupling)(circuit)
# Fails to map and returns the original circuit
self.assertEqual(circuit, actual)
def test_multi_cregs(self):
"""Test for multiple ClassicalRegisters."""
# ┌───┐ ░ ┌─┐
# qr_0: ──■────────────┤ X ├─░─┤M├─────────
# ┌─┴─┐ ┌───┐└─┬─┘ ░ └╥┘┌─┐
# qr_1: ┤ X ├──■──┤ H ├──■───░──╫─┤M├──────
# └───┘┌─┴─┐└───┘ ░ ║ └╥┘┌─┐
# qr_2: ──■──┤ X ├───────────░──╫──╫─┤M├───
# ┌─┴─┐└───┘ ░ ║ ║ └╥┘┌─┐
# qr_3: ┤ X ├────────────────░──╫──╫──╫─┤M├
# └───┘ ░ ║ ║ ║ └╥┘
# c: 2/════════════════════════╩══╬══╩══╬═
# 0 ║ 1 ║
# ║ ║
# d: 2/═══════════════════════════╩═════╩═
# 0 1
qr = QuantumRegister(4, "qr")
cr1 = ClassicalRegister(2, "c")
cr2 = ClassicalRegister(2, "d")
circuit = QuantumCircuit(qr, cr1, cr2)
circuit.cx(qr[0], qr[1])
circuit.cx(qr[2], qr[3])
circuit.cx(qr[1], qr[2])
circuit.h(qr[1])
circuit.cx(qr[1], qr[0])
circuit.barrier(qr)
circuit.measure(qr[0], cr1[0])
circuit.measure(qr[1], cr2[0])
circuit.measure(qr[2], cr1[1])
circuit.measure(qr[3], cr2[1])
coupling = CouplingMap([[0, 1], [0, 2], [2, 3]]) # linear [1, 0, 2, 3]
property_set = {}
actual = BIPMapping(coupling, objective="depth")(circuit, property_set)
self.assertEqual(5, actual.depth())
CheckMap(coupling)(actual, property_set)
self.assertTrue(property_set["is_swap_mapped"])
def test_swaps_in_dummy_steps(self):
"""Test the case when swaps are inserted in dummy steps."""
# ┌───┐ ░ ░
# q_0: ──■──┤ H ├─░───■────────░───■───────
# ┌─┴─┐├───┤ ░ │ ░ │
# q_1: ┤ X ├┤ H ├─░───┼────■───░───┼────■──
# └───┘├───┤ ░ │ ┌─┴─┐ ░ ┌─┴─┐ │
# q_2: ──■──┤ H ├─░───┼──┤ X ├─░─┤ X ├──┼──
# ┌─┴─┐├───┤ ░ ┌─┴─┐└───┘ ░ └───┘┌─┴─┐
# q_3: ┤ X ├┤ H ├─░─┤ X ├──────░──────┤ X ├
# └───┘└───┘ ░ └───┘ ░ └───┘
circuit = QuantumCircuit(4)
circuit.cx(0, 1)
circuit.cx(2, 3)
circuit.h([0, 1, 2, 3])
circuit.barrier()
circuit.cx(0, 3)
circuit.cx(1, 2)
circuit.barrier()
circuit.cx(0, 2)
circuit.cx(1, 3)
coupling = CouplingMap.from_line(4)
property_set = {}
actual = BIPMapping(coupling, objective="depth")(circuit, property_set)
self.assertEqual(7, actual.depth())
CheckMap(coupling)(actual, property_set)
self.assertTrue(property_set["is_swap_mapped"])
# no swaps before the first barrier
for inst, _, _ in actual.data:
if isinstance(inst, Barrier):
break
self.assertFalse(isinstance(inst, SwapGate))
def test_different_number_of_virtual_and_physical_qubits(self):
"""Test the case when number of virtual and physical qubits are different."""
# q_0: ──■────■───────
# ┌─┴─┐ │
# q_1: ┤ X ├──┼────■──
# └───┘ │ ┌─┴─┐
# q_2: ──■────┼──┤ X ├
# ┌─┴─┐┌─┴─┐└───┘
# q_3: ┤ X ├┤ X ├─────
# └───┘└───┘
circuit = QuantumCircuit(4)
circuit.cx(0, 1)
circuit.cx(2, 3)
circuit.cx(0, 3)
property_set = {}
coupling = CouplingMap.from_line(5)
actual = BIPMapping(coupling, objective="depth")(circuit, property_set)
self.assertEqual(2, actual.depth())
def test_qubit_subset(self):
"""Test if `qubit_subset` option works as expected."""
circuit = QuantumCircuit(3)
circuit.cx(0, 1)
circuit.cx(1, 2)
circuit.cx(0, 2)
coupling = CouplingMap([(0, 1), (1, 3), (3, 2)])
qubit_subset = [0, 1, 3]
actual = BIPMapping(coupling, qubit_subset=qubit_subset)(circuit)
# all used qubits are in qubit_subset
bit_indices = {bit: index for index, bit in enumerate(actual.qubits)}
for _, qargs, _ in actual.data:
for q in qargs:
self.assertTrue(bit_indices[q] in qubit_subset)
# ancilla qubits are set in the resulting qubit
idle = QuantumRegister(1, name="ancilla")
self.assertEqual(idle[0], actual._layout.initial_layout[2])
def test_unconnected_qubit_subset(self):
"""Fails if qubits in `qubit_subset` are not connected."""
circuit = QuantumCircuit(3)
circuit.cx(0, 1)
coupling = CouplingMap([(0, 1), (1, 3), (3, 2)])
with self.assertRaises(TranspilerError):
BIPMapping(coupling, qubit_subset=[0, 1, 2])(circuit)
def test_objective_function(self):
"""Test if ``objective`` functions prioritize metrics correctly."""
# ┌──────┐┌──────┐ ┌──────┐
# q_0: ┤0 ├┤0 ├─────┤0 ├
# │ Dcx ││ │ │ Dcx │
# q_1: ┤1 ├┤ Dcx ├──■──┤1 ├
# └──────┘│ │ │ └──────┘
# q_2: ───■────┤1 ├──┼─────■────
# ┌─┴─┐ └──────┘┌─┴─┐ ┌─┴─┐
# q_3: ─┤ X ├──────────┤ X ├─┤ X ├──
# └───┘ └───┘ └───┘
qc = QuantumCircuit(4)
qc.dcx(0, 1)
qc.cx(2, 3)
qc.dcx(0, 2)
qc.cx(1, 3)
qc.dcx(0, 1)
qc.cx(2, 3)
coupling = CouplingMap(FakeLima().configuration().coupling_map)
dep_opt = BIPMapping(coupling, objective="depth", qubit_subset=[0, 1, 3, 4])(qc)
err_opt = BIPMapping(
coupling,
objective="gate_error",
qubit_subset=[0, 1, 3, 4],
backend_prop=FakeLima().properties(),
)(qc)
# depth = number of su4 layers (mirrored gates have to be consolidated as single su4 gates)
pm_ = PassManager([Collect2qBlocks(), ConsolidateBlocks(basis_gates=["cx", "u"])])
dep_opt = pm_.run(dep_opt)
err_opt = pm_.run(err_opt)
self.assertLessEqual(dep_opt.depth(), err_opt.depth())
# count CNOTs after synthesized
dep_opt = UnitarySynthesis(basis_gates=["cx", "u"])(dep_opt)
err_opt = UnitarySynthesis(basis_gates=["cx", "u"])(err_opt)
self.assertGreater(dep_opt.count_ops()["cx"], err_opt.count_ops()["cx"])
|
https://github.com/qiskit-community/qiskit-bip-mapper
|
qiskit-community
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the BIPMapping pass."""
import unittest
from qiskit import QuantumCircuit
from qiskit.circuit.library.standard_gates import SwapGate
from qiskit.compiler.transpiler import transpile
from qiskit.transpiler.coupling import CouplingMap
from qiskit.transpiler.preset_passmanagers.plugin import list_stage_plugins
class TestBIPMapping(unittest.TestCase):
"""Tests the BIPMapping plugin."""
def test_plugin_in_list(self):
"""Test bip plugin is installed."""
self.assertIn("bip", list_stage_plugins("routing"))
def test_trivial_case(self):
"""No need to have any swap, the CX are distance 1 to each other.
q0:--(+)-[H]-(+)-
| |
q1:---.-------|--
|
q2:-----------.--
CouplingMap map: [1]--[0]--[2]
"""
coupling = CouplingMap([[0, 1], [0, 2]])
circuit = QuantumCircuit(3)
circuit.cx(1, 0)
circuit.h(0)
circuit.cx(2, 0)
actual = transpile(
circuit, coupling_map=coupling, routing_method="bip", optimization_level=0
)
self.assertEqual(11, len(actual))
for inst, _, _ in actual.data: # there are no swaps
self.assertFalse(isinstance(inst, SwapGate))
|
https://github.com/contepablod/QCNNCancerBinaryClassifier
|
contepablod
|
from IPython.display import Image
Image('https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/Esophagus%20Cancer.JPG')
import os # módulo para manejar carpetas y archivos en nuestro ordenador
import random # módulo para aleatorizar
import numpy as np # biblioteca para manejar matrices y operaciones de matrices
import pandas as pd # biblioteca para manejar tablas de datos
#Skimage (Scikit-image): biblioteca para procesamiento de imágenes
from skimage import io #Modulo para leer una imagen (librería para procesamiento de imagenes)
#Sklearn (Scikit-learn): biblioteca para machine learning
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
#Bibliotecas para gráficar y visualizar
import matplotlib.pyplot as plt
import seaborn as sns
#Matriz de confusión
def matrix_confusion(yt, yp):
data = {'Y_Real': yt,
'Y_Prediccion': yp}
df = pd.DataFrame(data, columns=['Y_Real','Y_Prediccion'])
confusion_matrix = pd.crosstab(df['Y_Real'], df['Y_Prediccion'], rownames=['Real'], colnames=['Predicted'])
sns.heatmap(confusion_matrix, annot=True, fmt='g')
plt.show()
#Leemos los datos
datos = pd.read_csv("https://raw.githubusercontent.com/AnIsAsPe/ClassificadorCancerEsofago/master/Datos/ClasesImagenes.csv", usecols=[1,2])
datos.info() #muestra los primeros cinco registros
#¿cuántas imagenes tenemos de cada clase?
datos['class_number'].value_counts(sort=False)
Y = datos['class_number'] #Guardamos las etiquetas de las imagenes como serie de pandas
datos['image_filename']
path = "C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\CarpetaImagenes\\"
%time img = datos['image_filename'].apply(lambda x: io.imread(path + x, as_gray=True))
img.shape
img[0].shape
IMG = np.stack(img, axis=0) # Toma una secuencia de matrices y las apila a lo largo
# de un tercer eje para hacer una solo arreglo
IMG.shape
X = IMG.reshape(5063, -1) # se puede poner 67600 en vez de -1
X.shape
#El método GroupBy de Pandas separa un data frame en varios data frames
porClase = datos.groupby('class_number')
#elije al azar n muestras de cada subconjunto y guarda la posición de las figuras elegidas en una lista
n = 20
c = random.sample(porClase.get_group(1).index.tolist(), n) # indices de las imagenes cancerígenas seleccionadas
s = random.sample(porClase.get_group(0).index.tolist(), n) # indices de las imagenes sanas seleccionadas
# Grafica 20 imágenes aleatorias de tejido con cáncer y 20 de tejido sano
fig = plt.figure(figsize=(16, 8))
columns = 10
rows = 4
for i in range(0, columns * rows):
fig.add_subplot(rows, columns, i+1)
if i < 20:
plt.imshow(img[c[i]], cmap='Greys_r')
plt.title('cancer')
plt.xticks([])
plt.yticks([])
else:
plt.imshow(img[s[i-20]], cmap='Greys_r')
plt.title('tejido sano')
plt.xticks([])
plt.yticks([])
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,
shuffle=True, random_state=0) #random_state es el valor semilla
# ¿Cómo son los conjuntos de entrenamiento y prueba?
print("Training set")
print("X: ", X_train.shape)
print("Y: ", y_train.shape)
unique, counts = np.unique(y_train, return_counts=True)
print('Tejido Sano: ', counts[0],'\nDisplasia o Cáncer: ', counts[1],'\n')
print("Test set")
print("X: ", X_test.shape)
print("Y: ", y_test.shape)
unique, counts = np.unique(y_test, return_counts=True)
print('Tejido Sano: ', counts[0],'\nDisplasia o Cáncer: ', counts[1],'\n')
model_p = Perceptron(max_iter=50, random_state=0, verbose=True)
model_p.fit(X_train,y_train)
y_pred = model_p.predict(X_test) #pasa cada una de las imágenes de X_test por el modelo
print("Accuracy: %.2f%%" % (accuracy_score(y_test, y_pred)*100))
print("Precisión conjunto entrenamiento: %.2f%%" % (model_p.score(X_train, y_train)*100.0))
print("Precisión conjunto prueba: %.2f%%" % (model_p.score(X_test, y_test)*100.0))
matrix_confusion(y_test, y_pred)
model_mp = Perceptron(max_iter=1000, random_state=0, verbose=False, alpha=0.0001)
model_mp.fit(X_train,y_train)
print("Precisión conjunto entrenamiento: %.2f%%" % (model_mp.score(X_train, y_train)*100.0))
print("Precisión conjunto prueba: %.2f%%" % (model_mp.score(X_test, y_test)*100.0))
y_pred = model_mp.predict(X_test) #pasa cada una de las imágenes de X_test por el modelo
print("Accuracy: %.2f%%" % (accuracy_score(y_test, y_pred)*100))
matrix_confusion(y_test, y_pred)
model_mp1 = Perceptron(max_iter=1000, random_state=0, verbose=False, alpha=0.000001, penalty='l2') # Mas margen y con penalidad
model_mp1.fit(X_train,y_train)
print("Precisión conjunto entrenamiento: %.2f%%" % (model_mp1.score(X_train, y_train)*100.0))
print("Precisión conjunto prueba: %.2f%%" % (model_mp1.score(X_test, y_test)*100.0))
y_pred = model_mp1.predict(X_test) #pasa cada una de las imágenes de X_test por el modelo
print("Accuracy: %.2f%%" % (accuracy_score(y_test, y_pred)*100))
matrix_confusion(y_test, y_pred)
# Pytorch
import torch, torchvision, torch.utils
from torch import Tensor
from torch import cat
from torch.autograd.grad_mode import no_grad
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import (
Module,
Conv2d,
Linear,
Dropout2d,
NLLLoss, BCELoss, CrossEntropyLoss, MSELoss,
MaxPool2d,
Flatten,
Sequential,
ReLU,
)
import torch.nn.functional as F
from torchviz import make_dot
from torchsummary import summary
# Qiskit
from qiskit import Aer, QuantumCircuit
from qiskit.utils import QuantumInstance
from qiskit.opflow import AerPauliExpectation
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.quantum_info import DensityMatrix, entanglement_of_formation
from qiskit.quantum_info import DensityMatrix
from qiskit.visualization import plot_state_city
from qiskit_machine_learning.neural_networks import TwoLayerQNN
from qiskit_machine_learning.connectors import TorchConnector
train_data = torchvision.datasets.ImageFolder('C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\Imagenes_Clasificadas_Random_Split\\Train', transform=transforms.Compose([transforms.ToTensor()]))
test_data = torchvision.datasets.ImageFolder('C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\Imagenes_Clasificadas_Random_Split\\Test', transform=transforms.Compose([transforms.ToTensor()]))
train_data[0][0].shape
train_loader = DataLoader(train_data, shuffle=True, batch_size=1)
test_loader = DataLoader(test_data, shuffle=True, batch_size=1)
# False significa que no hay cancer (0) y True que sí (1)
print((train_loader.dataset.class_to_idx))
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 10))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap=plt.cm.rainbow)
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title(f"Labeled: {targets[0].item()}")
n_samples_show -= 1
Image(url='https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/hybridnetwork.png')
# Declaramos Instancia Cuantica
qi = QuantumInstance(Aer.get_backend("aer_simulator_statevector"))
Image(url='https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/neuralnetworkQC.png')
# Definimos y creamos la red neuronal cuántica
def create_qnn():
feature_map = ZZFeatureMap(2)
ansatz = RealAmplitudes(2, reps=1)
# input_gradients=True para gradiente híbrido
qnn = TwoLayerQNN(
2, #numero de Qubits, solo son posibles dos estados
feature_map,
ansatz,
input_gradients=True,
exp_val=AerPauliExpectation(),
quantum_instance=qi,
)
return qnn
qnn = create_qnn()
qnn.circuit.draw(output='mpl')
qnn.feature_map.decompose().draw(output='mpl')
qnn.ansatz.decompose().draw(output='mpl')
qnn.circuit.parameters
params = np.random.uniform(-1, 1, len(qnn.circuit.parameters))
params
rho_01 = DensityMatrix.from_instruction(qnn.circuit.bind_parameters(params))
plot_state_city(rho_01.data, title='Density Matrix')
gamma_p = rho_01.purity()
display(rho_01.draw('latex', prefix='\\rho_p = '))
print("State purity: ", np.round(np.real(gamma_p)))
print(f'{entanglement_of_formation(rho_01):.4f}')
# Red Neuronal en Pytorch
class Net(Module):
def __init__(self, qnn):
super().__init__()
self.conv1 = Conv2d(3, 128, kernel_size=5)
self.conv2 = Conv2d(128, 128, kernel_size=3)
self.dropout = Dropout2d()
self.fc1 = Linear(508032, 128)
self.fc2 = Linear(128, 2) # Input bidimensional para la red neuronal cuántica
self.qnn = TorchConnector(qnn) # Aplicamos el conector Pytorch para conectar la red neuronal y el circuito
self.fc3 = Linear(1, 1) # Salida unidimensional del circuito cuántico
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # Aplicamos la red cuántica nuevamente en la sección forward
x = self.fc3(x)
return cat((x, 1 - x), -1)
model = Net(qnn)
model = model.to('cuda')
print(model)
summary(model, (3, 260, 260), batch_size=-1, device='cuda')
# dummy_tensor = next(iter(train_loader))[0].to('cuda')
# make_dot(model(dummy_tensor), params=dict(list(model.named_parameters())), show_saved=True, show_attrs=True).render("rnn_torchviz", format="png")
#Image('https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/rnn_torchviz.png')
Image(r'C:\Users\conte\OneDrive\Escritorio\qiskit-fall-fest-peru-2022-main\qiskit-community-tutorials-master\qiskit-community-tutorials-master\drafts\rnn_torchviz.png', width=2000, height=2250)
# Definimos optimizador y función de pérdida
optimizer = optim.Adam(model.parameters(), lr=0.00001)
loss_func = CrossEntropyLoss().to('cuda')
# Empezamos entrenamiento
epochs = 50 # Número de épocas
model.train() # Modelo en modo entrenamiento
loss_list = []
total_accuracy = []
for epoch in range(epochs):
correct = 0
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Se inicializa gradiente
output = model(data.to('cuda')) # Forward pass, Datos a GPU
loss = loss_func(output, target.to('cuda')) #Etiquetas a GPU
loss.backward() # Backward pass
optimizer.step() # Optimizamos pesos
total_loss.append(loss.item()) # Cálculo de la función de pérdida
train_pred = output.argmax(dim=1, keepdim=True)
correct += train_pred.eq(target.to('cuda').view_as(train_pred)).sum().item()
loss_list.append(sum(total_loss) / len(total_loss))
accuracy = 100 * correct / len(train_loader) #Cálculo de precisión
total_accuracy.append(accuracy)
print(f"Training [{100.0 * (epoch + 1) / epochs:.0f}%]\tLoss: {loss_list[-1]:.4f}\tAccuracy: {accuracy:.2f}%")
# Grafico de Convergencia de la función de pérdida y precisión
fig, ax1 = plt.subplots()
ax1.plot(loss_list, 'g-')
ax2 = ax1.twinx()
ax2.plot(total_accuracy, 'b')
plt.title("Hybrid NN Training Convergence", color='red')
ax1.set_xlabel("Training Iterations")
ax1.set_ylabel("Cross Entropy Loss", color='g')
ax2.set_ylabel("Accuracy (%)", color='b')
plt.show()
torch.save(model.state_dict(), "model.pt")
qnn1 = create_qnn()
model1 = Net(qnn1)
model1.load_state_dict(torch.load("model.pt"))
model1= model1.to('cuda')
batch_size=1
model1.eval() # Evaluación del Modelo
pred_targets = []
test_targets= []
with no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model1(data.to('cuda'))
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
pred_targets.append(pred.item())
test_targets.append(target.item())
correct += pred.eq(target.to('cuda').view_as(pred)).sum().item()
loss = loss_func(output, target.to('cuda'))
total_loss.append(loss.item())
print(
"Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.2f}%".format(
sum(total_loss) / len(total_loss), correct / len(test_loader) / batch_size * 100
)
)
# Ploteo de Imagenes Predichas
from PIL import Image
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model1.eval()
with no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model1(data.to('cuda')[0:1])
if len(output.shape) == 1:
output = output.reshape(3, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(torchvision.transforms.ToPILImage(mode='RGB')(data[0].squeeze()), cmap=plt.cm.rainbow)
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title("Predicted {}".format(pred.item()))
count += 1
matrix_confusion(test_targets, pred_targets)
|
https://github.com/contepablod/QCNNCancerBinaryClassifier
|
contepablod
|
from IPython.display import Image
Image('https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/Esophagus%20Cancer.JPG')
import os
import random
import numpy as np
import pandas as pd
from skimage import io
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
def matrix_confusion(yt, yp):
data = {'Y_Real': yt,
'Y_Prediccion': yp}
df = pd.DataFrame(data, columns=['Y_Real','Y_Prediccion'])
confusion_matrix = pd.crosstab(df['Y_Real'], df['Y_Prediccion'], rownames=['Real'], colnames=['Predicted'])
sns.heatmap(confusion_matrix, annot=True, fmt='g')
plt.show()
data = pd.read_csv("https://raw.githubusercontent.com/AnIsAsPe/ClassificadorCancerEsofago/master/Datos/ClasesImagenes.csv", usecols=[1,2])
data.info()
data['class_number'].value_counts(sort=False)
Y = data['class_number']
data['image_filename']
path = "C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\CarpetaImagenes\\"
%time img = data['image_filename'].apply(lambda x: io.imread(path + x, as_gray=True))
img.shape
img[0].shape
IMG = np.stack(img, axis=0)
IMG.shape
X = IMG.reshape(5063, -1) # se puede poner 67600 en vez de -1
X.shape
byClass = data.groupby('class_number')
n = 20
c = random.sample(byClass.get_group(1).index.tolist(), n)
s = random.sample(byClass.get_group(0).index.tolist(), n)
fig = plt.figure(figsize=(16, 8))
columns = 10
rows = 4
for i in range(0, columns * rows):
fig.add_subplot(rows, columns, i+1)
if i < 20:
plt.imshow(img[c[i]], cmap='Greys_r')
plt.title('Cancer')
plt.xticks([])
plt.yticks([])
else:
plt.imshow(img[s[i-20]], cmap='Greys_r')
plt.title('Healthy Tissue')
plt.xticks([])
plt.yticks([])
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,
shuffle=True, random_state=0)
print("Training set")
print("X: ", X_train.shape)
print("Y: ", y_train.shape)
unique, counts = np.unique(y_train, return_counts=True)
print('Healthy Tissue: ', counts[0],'\nCancer: ', counts[1],'\n')
print("Test set")
print("X: ", X_test.shape)
print("Y: ", y_test.shape)
unique, counts = np.unique(y_test, return_counts=True)
print('Healthy Tissue: ', counts[0],'\nCancer: ', counts[1],'\n')
model_p = Perceptron(max_iter=50, random_state=0, verbose=True)
model_p.fit(X_train,y_train)
print("Train Set Accuracy: %.2f%%" % (model_p.score(X_train, y_train)*100.0))
print("Test Set Accuracy: %.2f%%" % (model_p.score(X_test, y_test)*100.0))
y_pred = model_p.predict(X_test)
print("Accuracy: %.2f%%" % (accuracy_score(y_test, y_pred)*100))
matrix_confusion(y_test, y_pred)
model_mp = Perceptron(max_iter=1000, random_state=0, verbose=False, alpha=0.0001)
model_mp.fit(X_train,y_train)
print("Train Set Accuracy: %.2f%%" % (model_mp.score(X_train, y_train)*100.0))
print("Test Set Accuracy: %.2f%%" % (model_mp.score(X_test, y_test)*100.0))
y_pred = model_mp.predict(X_test)
print("Accuracy: %.2f%%" % (accuracy_score(y_test, y_pred)*100))
matrix_confusion(y_test, y_pred)
model_mp1 = Perceptron(max_iter=1000, random_state=0, verbose=False, alpha=0.000001, penalty='l2') # Adds L2 penalty
model_mp1.fit(X_train,y_train)
print("Train Set Accuracy: %.2f%%" % (model_mp1.score(X_train, y_train)*100.0))
print("Test Set Accuracy: %.2f%%" % (model_mp1.score(X_test, y_test)*100.0))
y_pred = model_mp1.predict(X_test) #pasa cada una de las imágenes de X_test por el modelo
print("Accuracy: %.2f%%" % (accuracy_score(y_test, y_pred)*100))
matrix_confusion(y_test, y_pred)
# Pytorch
import torch, torchvision, torch.utils
from torch import Tensor
from torch import cat
from torch.autograd.grad_mode import no_grad
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import (
Module,
Conv2d,
Linear,
Dropout2d,
NLLLoss, BCELoss, CrossEntropyLoss, MSELoss,
MaxPool2d,
Flatten,
Sequential,
ReLU,
)
import torch.nn.functional as F
from torchviz import make_dot
from torchsummary import summary
# Qiskit
from qiskit import Aer, QuantumCircuit
from qiskit.utils import QuantumInstance
from qiskit.opflow import AerPauliExpectation
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.quantum_info import DensityMatrix, entanglement_of_formation
from qiskit.visualization import plot_state_city
from qiskit_machine_learning.neural_networks import TwoLayerQNN
from qiskit_machine_learning.connectors import TorchConnector
train_data = torchvision.datasets.ImageFolder('C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\Imagenes_Clasificadas_Random_Split\\Train', transform=transforms.Compose([transforms.ToTensor()]))
test_data = torchvision.datasets.ImageFolder('C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\Imagenes_Clasificadas_Random_Split\\Test', transform=transforms.Compose([transforms.ToTensor()]))
train_data[0][0].shape
train_loader = DataLoader(train_data, shuffle=True, batch_size=1)
test_loader = DataLoader(test_data, shuffle=True, batch_size=1)
# False is no cancer (0) and True, yes (1)
print((train_loader.dataset.class_to_idx))
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 10))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap=plt.cm.rainbow)
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title(f"Labeled: {targets[0].item()}")
n_samples_show -= 1
Image(url='https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/hybridnetwork.png')
qi = QuantumInstance(Aer.get_backend("aer_simulator_statevector"))
Image(url='https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/neuralnetworkQC.png')
def create_qnn():
feature_map = ZZFeatureMap(2)
ansatz = RealAmplitudes(2, reps=1)
qnn = TwoLayerQNN(
2,
feature_map,
ansatz,
input_gradients=True,
exp_val=AerPauliExpectation(),
quantum_instance=qi,
)
return qnn
qnn = create_qnn()
qnn.circuit.draw(output='mpl')
qnn.feature_map.decompose().draw(output='mpl')
qnn.ansatz.decompose().draw(output='mpl')
qnn.circuit.parameters
params = np.random.uniform(-1, 1, len(qnn.circuit.parameters))
params
rho_01 = DensityMatrix.from_instruction(qnn.circuit.bind_parameters(params))
plot_state_city(rho_01.data, title='Density Matrix', figsize=(12,6))
gamma_p = rho_01.purity()
display(rho_01.draw('latex', prefix='\\rho_p = '))
print("State purity: ", np.round(np.real(gamma_p), 3))
print(f'{entanglement_of_formation(rho_01):.4f}')
# Red Neuronal en Pytorch
class Net(Module):
def __init__(self, qnn):
super().__init__()
self.conv1 = Conv2d(3, 128, kernel_size=5)
self.conv2 = Conv2d(128, 128, kernel_size=3)
self.dropout = Dropout2d()
self.fc1 = Linear(508032, 128)
self.fc2 = Linear(128, 2) # QNN binary input
self.qnn = TorchConnector(qnn) # use TorchConnector to bind quantum node and classic convolutional layers
self.fc3 = Linear(1, 1) # Quantum Circuit Output
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # QNN in foward section
x = self.fc3(x)
return cat((x, 1 - x), -1)
model = Net(qnn)
model = model.to('cuda')
print(model)
summary(model, (3, 260, 260), batch_size=-1, device='cuda')
# dummy_tensor = next(iter(train_loader))[0].to('cuda')
# make_dot(model(dummy_tensor), params=dict(list(model.named_parameters())), show_saved=True, show_attrs=True).render("rnn_torchviz", format="png")
#Image('https://raw.githubusercontent.com/contepablod/QCNNCancerClassifier/master/rnn_torchviz.png')
Image(r'C:\Users\conte\OneDrive\Escritorio\qiskit-fall-fest-peru-2022-main\qiskit-community-tutorials-master\qiskit-community-tutorials-master\drafts\rnn_torchviz.png', width=2000, height=2250)
optimizer = optim.Adam(model.parameters(), lr=0.00001)
loss_func = CrossEntropyLoss().to('cuda')
epochs = 50
model.train()
loss_list = []
total_accuracy = []
for epoch in range(epochs):
correct = 0
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True)
output = model(data.to('cuda')) # Forward pass, Data to GPU
loss = loss_func(output, target.to('cuda')) #Labels to GPU
loss.backward() # Backward pass
optimizer.step()
total_loss.append(loss.item())
train_pred = output.argmax(dim=1, keepdim=True)
correct += train_pred.eq(target.to('cuda').view_as(train_pred)).sum().item()
loss_list.append(sum(total_loss) / len(total_loss))
accuracy = 100 * correct / len(train_loader)
total_accuracy.append(accuracy)
print(f"Training [{100.0 * (epoch + 1) / epochs:.0f}%]\tLoss: {loss_list[-1]:.4f}\tAccuracy: {accuracy:.2f}%")
fig, ax1 = plt.subplots()
ax1.plot(loss_list, 'g-')
ax2 = ax1.twinx()
ax2.plot(total_accuracy, 'b')
plt.title("Hybrid NN Training Convergence", color='red')
ax1.set_xlabel("Training Iterations")
ax1.set_ylabel("Cross Entropy Loss", color='g')
ax2.set_ylabel("Accuracy (%)", color='b')
plt.show()
torch.save(model.state_dict(), "model.pt")
qnn1 = create_qnn()
model1 = Net(qnn1)
model1.load_state_dict(torch.load("model.pt"))
model1= model1.to('cuda')
batch_size=1
model1.eval()
pred_targets = []
test_targets= []
with no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model1(data.to('cuda'))
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
pred_targets.append(pred.item())
test_targets.append(target.item())
correct += pred.eq(target.to('cuda').view_as(pred)).sum().item()
loss = loss_func(output, target.to('cuda'))
total_loss.append(loss.item())
print(f"Performance on test data:\n\tLoss: {sum(total_loss) / len(total_loss):.4f}\n\tAccuracy: {100 * correct / len(test_loader) / batch_size:.2f}%")
from PIL import Image
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model1.eval()
with no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model1(data.to('cuda')[0:1])
if len(output.shape) == 1:
output = output.reshape(3, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(torchvision.transforms.ToPILImage(mode='RGB')(data[0].squeeze()), cmap=plt.cm.rainbow)
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title("Predicted {}".format(pred.item()))
count += 1
matrix_confusion(test_targets, pred_targets)
|
https://github.com/contepablod/QCNNCancerBinaryClassifier
|
contepablod
|
import os
import shutil
import random
import pathlib
import pandas as pd
data = pd.read_csv("https://raw.githubusercontent.com/AnIsAsPe/ClassificadorCancerEsofago/master/Datos/ClasesImagenes.csv", usecols=[1,2])
data['class'] = data['class_number'] == 1
data.head()
data['class'].value_counts()
#Spliting and Copying into Train-Test Folders
target_dir = 'C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\ImagenesClasificadas\\'
image_path = 'C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\CarpetaImagenes\\'
count = 0
for image_split in [False, True]:
labels = list(data[(data['class']==image_split)]['image_filename'])
for label in labels:
to_dir = pathlib.Path(str(target_dir) + str(image_split) + '/' + str(label))
if not to_dir.is_dir():
to_dir.parent.mkdir(parents=True, exist_ok=True)
from_dir = pathlib.Path(image_path + label)
shutil.copy2(from_dir, to_dir)
count += 1
print(f'{count:.0f} images copied ')
# Setup data paths
data_path = 'C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\ImagenesClasificadas\\'
# Create function to split randomly in Train and Test
def train_test_split(image_path=data_path,
data_splits=['Train', 'Test'],
target_classes = [False, True],
split=0.3,
seed=42):
random.seed(seed)
label_splits = {}
# Get labels
for data_split in data_splits:
print(f"[INFO] Creating image split for: {data_split}...")
image_paths = []
for target in target_classes:
labels = list(data[(data['class']==target)]['image_filename'])
sample = round((1-split)*len(labels))
print(f"Getting random set of {sample} images for {data_split}-{target} ...")
if data_split == 'Train':
sampled_images = random.sample(labels, k=sample)
elif data_split == 'Test':
sampled_images = labels
image_paths.append([pathlib.Path(str(image_path + str(target) + '/' + sample_image)) for sample_image in sampled_images])
data.drop(data[data['image_filename'].isin(sampled_images)].index, inplace=True)
print(len(data))
image_path_flat = [item for sublist in image_paths for item in sublist]
label_splits[data_split] = image_path_flat
print('\n','Finish splitting!')
return label_splits
label_splits = train_test_split(split=0.3, seed=42)
label_splits['Train'][0:10]
from collections import Counter
counter1 = Counter(label_splits['Train'])
counter2 = Counter(label_splits['Test'])
common_elements = counter1 & counter2
len(common_elements)
# Create target directory path
target_dir_name = 'C:\\Users\\conte\\OneDrive\\Escritorio\\Colegio Bourbaki\\ML-AI-WA\\Perceptron\\Imagenes_Clasificadas_Random_Split'
print(f"Creating directory: '{target_dir_name}'")
# Setup the directories & Make the directories
target_dir = pathlib.Path(target_dir_name)
target_dir.mkdir(parents=True, exist_ok=True)
import shutil
count=0
for image_split in label_splits.keys():
for image_path in label_splits[str(image_split)]:
dest_dir = target_dir / image_split / image_path.parts[-2] / image_path.name
if not dest_dir.parent.is_dir():
dest_dir.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(image_path, dest_dir)
count += 1
print(f'{count:.0f} images moved')
# for image_split in label_splits.keys():
# delete_dir = target_dir / image_split
# files_in_dir = os.listdir(delete_dir) # get list of files in the directory
# for file in files_in_dir: # loop to delete each file in folder
# os.remove(f'{delete_dir}/{file}') # delete file
# os.rmdir(image_path.parent)
|
https://github.com/if-quantum/pairwise-tomography
|
if-quantum
|
# pylint: disable=missing-docstring
import unittest
import itertools
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit, execute, Aer
from qiskit.quantum_info import state_fidelity
from qiskit.quantum_info import partial_trace
from qiskit.quantum_info.states import DensityMatrix
from pairwise_tomography.pairwise_state_tomography_circuits import pairwise_state_tomography_circuits
from pairwise_tomography.pairwise_fitter import PairwiseStateTomographyFitter
n_list = [3, 4]
nshots = 5000
pauli = {'I': np.eye(2),
'X': np.array([[0, 1], [1, 0]]),
'Y': np.array([[0, -1j], [1j, 0]]),
'Z': np.array([[1, 0], [0, -1]])}
def pauli_expectation(rho, i, j):
# i and j get swapped because of qiskit bit convention
return np.real(np.trace(np.kron(pauli[j], pauli[i]) @ rho))
class TestPairwiseStateTomography(unittest.TestCase):
def test_pairwise_tomography(self):
for n in n_list:
with self.subTest():
self.tomography_random_circuit(n)
def tomography_random_circuit(self, n):
q = QuantumRegister(n)
qc = QuantumCircuit(q)
psi = ((2 * np.random.rand(2 ** n) - 1)
+ 1j * (2 *np.random.rand(2 ** n) - 1))
psi /= np.linalg.norm(psi)
qc.initialize(psi, q)
rho = DensityMatrix.from_instruction(qc).data
circ = pairwise_state_tomography_circuits(qc, q)
job = execute(circ, Aer.get_backend("qasm_simulator"), shots=nshots)
fitter = PairwiseStateTomographyFitter(job.result(), circ, q)
result = fitter.fit()
result_exp = fitter.fit(output='expectation')
# Compare the tomography matrices with the partial trace of
# the original state using fidelity
for (k, v) in result.items():
trace_qubits = list(range(n))
trace_qubits.remove(k[0])
trace_qubits.remove(k[1])
rhok = partial_trace(rho, trace_qubits).data
try:
self.check_density_matrix(v, rhok)
except:
print("Problem with density matrix:", k)
raise
try:
self.check_pauli_expectaion(result_exp[k], rhok)
except:
print("Problem with expectation values:", k)
raise
def check_density_matrix(self, item, rho):
fidelity = state_fidelity(item, rho)
try:
self.assertAlmostEqual(fidelity, 1, delta=4 / np.sqrt(nshots))
except AssertionError:
print(fidelity)
raise
def check_pauli_expectaion(self, item, rho):
for (a, b) in itertools.product(pauli.keys(), pauli.keys()):
if not (a == "I" and b == "I"):
correct = pauli_expectation(rho, a, b)
tomo = item[(a, b)]
# The variance on the expectation values
sigma = np.sqrt((1 - correct ** 2) / nshots)
try:
# A delta of 4*sigma should guarantee that 99.98% of results
# are within bounds
self.assertAlmostEqual(tomo, correct, delta=4*sigma)
except AssertionError:
print(a, b, correct, tomo)
raise
def test_meas_qubit_specification(self):
n = 4
q = QuantumRegister(n)
qc = QuantumCircuit(q)
psi = ((2 * np.random.rand(2 ** n) - 1)
+ 1j * (2 *np.random.rand(2 ** n) - 1))
psi /= np.linalg.norm(psi)
qc.initialize(psi, q)
rho = DensityMatrix.from_instruction(qc).data
measured_qubits = [q[0], q[2], q[3]]
circ = pairwise_state_tomography_circuits(qc, measured_qubits)
job = execute(circ, Aer.get_backend("qasm_simulator"), shots=nshots)
fitter = PairwiseStateTomographyFitter(job.result(), circ, measured_qubits)
result = fitter.fit()
result_exp = fitter.fit(output='expectation')
# Compare the tomography matrices with the partial trace of
# the original state using fidelity
for (k, v) in result.items():
#TODO: This method won't work if measured_qubits is not ordered in
# wrt the DensityMatrix object.
trace_qubits = list(range(n))
trace_qubits.remove(measured_qubits[k[0]].index)
trace_qubits.remove(measured_qubits[k[1]].index)
print(trace_qubits, rho.shape)
rhok = partial_trace(rho, trace_qubits).data
try:
self.check_density_matrix(v, rhok)
except:
print("Problem with density matrix:", k)
raise
try:
self.check_pauli_expectaion(result_exp[k], rhok)
except:
print("Problem with expectation values:", k)
raise
def test_multiple_registers(self):
n = 4
q = QuantumRegister(n / 2)
p = QuantumRegister(n / 2)
qc = QuantumCircuit(q, p)
qc.h(q[0])
qc.rx(np.pi/4, q[1])
qc.cx(q[0], p[0])
qc.cx(q[1], p[1])
rho = DensityMatrix.from_instruction(qc).data
measured_qubits = q#[q[0], q[1], q[2]]
circ = pairwise_state_tomography_circuits(qc, measured_qubits)
job = execute(circ, Aer.get_backend("qasm_simulator"), shots=nshots)
fitter = PairwiseStateTomographyFitter(job.result(), circ, measured_qubits)
result = fitter.fit()
result_exp = fitter.fit(output='expectation')
# Compare the tomography matrices with the partial trace of
# the original state using fidelity
for (k, v) in result.items():
trace_qubits = list(range(n))
trace_qubits.remove(measured_qubits[k[0]].index)
trace_qubits.remove(measured_qubits[k[1]].index)
rhok = partial_trace(rho, trace_qubits).data
try:
self.check_density_matrix(v, rhok)
except:
print("Problem with density matrix:", k)
raise
try:
self.check_pauli_expectaion(result_exp[k], rhok)
except:
print("Problem with expectation values:", k)
raise
if __name__ == '__main__':
unittest.main()
|
https://github.com/if-quantum/pairwise-tomography
|
if-quantum
|
from qiskit import execute, Aer
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.providers import JobStatus
from qiskit.tools.qi.qi import partial_trace
from qiskit.quantum_info import state_fidelity
from qiskit.compiler import transpile
from qiskit.quantum_info.random.utils import random_state
import time
from pairwise_tomography.pairwise_state_tomography_circuits import pairwise_state_tomography_circuits
from pairwise_tomography.pairwise_fitter import PairwiseStateTomographyFitter
from pairwise_tomography.utils import concurrence
backend = Aer.get_backend('qasm_simulator')
nq = 5
q = QuantumRegister(nq)
qc = QuantumCircuit(q)
qc.ry(3., q[0])
qc.cx(q[0], q[1])
qc.x(q[1])
qc.ry(2.1, q[2])
qc.cx(q[2], q[3])
qc.h(q[4])
qc.cx(q[4], q[1])
qc.cx(q[1], q[4])
qc.cx(q[4], q[3])
qc.cx(q[3], q[4])
qc.draw(output='mpl')
pw_tomo_circs = pairwise_state_tomography_circuits(qc, q)
print(len(pw_tomo_circs))
pw_tomo_circs[10].draw(output='mpl')
job = execute(pw_tomo_circs, Aer.get_backend('qasm_simulator'), shots=8192)
fitter = PairwiseStateTomographyFitter(job.result(), pw_tomo_circs, q)
fit_result = fitter.fit()
print(fit_result[(0,1)])
pairwise_entanglement = {key: concurrence(value) for key, value in fit_result.items()}
print(pairwise_entanglement)
from pairwise_tomography.visualization import draw_entanglement_graph
draw_entanglement_graph(pairwise_entanglement)
|
https://github.com/if-quantum/pairwise-tomography
|
if-quantum
|
"""
Pairwise tomography circuit generation
"""
# pylint: disable=invalid-name
import numpy as np
from qiskit import ClassicalRegister
from qiskit.ignis.verification.tomography.basis.circuits import _format_registers
def pairwise_state_tomography_circuits(circuit, measured_qubits):
"""
Generates a minimal set of circuits for pairwise state tomography.
This performs measurement in the Pauli-basis resulting in
circuits for an n-qubit state tomography experiment.
Args:
circuit (QuantumCircuit): the state preparation circuit to be
tomographed.
measured_qubits (QuantumRegister): the qubits to be measured.
This can also be a list of whole QuantumRegisters or
individual QuantumRegister qubit tuples.
Returns:
A list of QuantumCircuit objects containing the original circuit
with state tomography measurements appended at the end.
"""
### Initialisation stuff
if isinstance(measured_qubits, list):
# Unroll list of registers
meas_qubits = _format_registers(*measured_qubits)
else:
meas_qubits = _format_registers(measured_qubits)
N = len(meas_qubits)
cr = ClassicalRegister(len(meas_qubits))
### Uniform measurement settings
X = circuit.copy(name=str(('X',)*N))
Y = circuit.copy(name=str(('Y',)*N))
Z = circuit.copy(name=str(('Z',)*N))
X.add_register(cr)
Y.add_register(cr)
Z.add_register(cr)
for bit_index, qubit in enumerate(meas_qubits):
X.h(qubit)
Y.sdg(qubit)
Y.h(qubit)
X.measure(qubit, cr[bit_index])
Y.measure(qubit, cr[bit_index])
Z.measure(qubit, cr[bit_index])
output_circuit_list = [X, Y, Z]
### Heterogeneous measurement settings
# Generation of six possible sequences
sequences = []
meas_bases = ['X', 'Y', 'Z']
for i in range(3):
for j in range(2):
meas_bases_copy = meas_bases[:]
sequence = [meas_bases_copy[i]]
meas_bases_copy.remove(meas_bases_copy[i])
sequence.append(meas_bases_copy[j])
meas_bases_copy.remove(meas_bases_copy[j])
sequence.append(meas_bases_copy[0])
sequences.append(sequence)
# Qubit colouring
nlayers = int(np.ceil(np.log(float(N))/np.log(3.0)))
for layout in range(nlayers):
for sequence in sequences:
meas_layout = circuit.copy()
meas_layout.add_register(cr)
meas_layout.name = ()
for bit_index, qubit in enumerate(meas_qubits):
local_basis = sequence[int(float(bit_index)/float(3**layout))%3]
if local_basis == 'Y':
meas_layout.sdg(qubit)
if local_basis != 'Z':
meas_layout.h(qubit)
meas_layout.measure(qubit, cr[bit_index])
meas_layout.name += (local_basis,)
meas_layout.name = str(meas_layout.name)
output_circuit_list.append(meas_layout)
return output_circuit_list
|
https://github.com/qiskit-community/archiver4qiskit
|
qiskit-community
|
import os
import qiskit
from qiskit import IBMQ, Aer
import uuid
import pickle
try:
IBMQ.load_account()
except:
print('Unable to load IBMQ account')
def _prep():
if 'archive' not in os.listdir():
os.mkdir('archive')
_prep()
class Archive():
'''
A serializable equivalent to the Qiskit job object.
'''
def __init__(self, job, path='', note='', circuits=None):
if 'job_id' in dir(job):
self.archive_id = job.job_id() + '@' + job.backend().name()
else:
self.archive_id = uuid.uuid4().hex + '@' + job.backend().name()
self.path = path
self.note = note
self._job_id = job.job_id()
self._backend = job.backend()
self._backend.properties() # just needs to be called to load
self._metadata = job.metadata
self.version = job.version
if 'circuits' in dir(job):
self._circuits = job.circuits()
else:
self._circuits = circuits
if 'qobj' in dir(job):
self._qobj = job.qobj()
if 'aer' in self.backend().name():
self._result = job.result()
else:
self._result = None
self.save()
def save(self):
with open(self.path + 'archive/'+self.archive_id, 'wb') as file:
pickle.dump(self, file)
def job_id(self):
return self._job_id
def backend(self):
return self._backend
def metadata(self):
return self._job_id
def circuits(self):
return self._circuits
def qobj(self):
return self._qobj
def result(self):
if self._result==None:
backend = get_backend(self.backend().name())
job = backend.retrieve_job(self.job_id())
self._result = job.result()
self.save()
return self._result
def get_backend(backend_name):
'''
Given a string that specifies a backend, returns the backend object
'''
if type(backend_name) is str:
if 'aer' in backend_name:
backend = Aer.get_backend(backend_name)
else:
providers = IBMQ.providers()
p = 0
no_backend = True
for provider in providers:
if no_backend:
backends = provider.backends()
for potential_backend in backends:
if potential_backend.name()==backend_name:
backend = potential_backend
no_backend = False
if no_backend:
print('No backend was found matching '+backend_name+' with your providers.')
else:
backend = backend_name
return backend
def submit_job(circuits, backend_name, path='', note='',
job_name=None, job_share_level=None, job_tags=None, experiment_id=None, header=None,
shots=None, memory=None, qubit_lo_freq=None, meas_lo_freq=None, schedule_los=None,
meas_level=None, meas_return=None, memory_slots=None, memory_slot_size=None,
rep_time=None, rep_delay=None, init_qubits=None, parameter_binds=None, use_measure_esp=None,
**run_config):
'''
Given a backend name and the arguments for the `run` method of the backend object, submits the job
and returns the archive id.
'''
# get backend
backend = get_backend(backend_name)
backend_name = backend.name()
# submit job
job = backend.run(circuits, job_name=job_name, job_share_level=job_share_level, job_tags=job_tags,
experiment_id=experiment_id, header=header, shots=shots, memory=memory,
qubit_lo_freq=qubit_lo_freq, meas_lo_freq=meas_lo_freq, schedule_los=schedule_los,
meas_level=meas_level, meas_return=meas_return, memory_slots=memory_slots,
memory_slot_size=memory_slot_size, rep_time=rep_time, rep_delay=rep_delay, init_qubits=init_qubits,
parameter_binds=parameter_binds, use_measure_esp=use_measure_esp,
**run_config)
# create archive
archive = Archive(job, note=note, circuits=circuits)
# if an Aer job, get the results
if 'aer' in job.backend().name():
archive.result()
# return the id
return archive.archive_id
def get_job(archive_id):
'''
Returns the Qiskit job object corresponding to a given archive_id
'''
job_id, backend_name = archive_id.split('@')
backend = get_backend(backend_name)
job = backend.retrieve_job(job_id)
return job
def get_archive(archive_id, path=''):
'''
Returns the saved archive object corresponding to a given archive_id
'''
with open(path + 'archive/'+archive_id, 'rb') as file:
archive = pickle.load(file)
return archive
def jobid2archive(job_id, backend_name):
backend = get_backend(backend_name)
job = backend.retrieve_job(job_id)
archive = Archive(job)
archive.result()
return archive.archive_id
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
from qiskit.tools.jupyter import *
from qiskit import IBMQ
IBMQ.load_account()
#provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, Aer
from qiskit import execute
# Create one 4 qubit QuantumRegister to hold the Bayesian network and an ancilla qubit,
# and a 3 bit ClassicalRegister to hold the sampled values
net = QuantumRegister(4, 'qreg')
cl = ClassicalRegister(3, 'creg')
circ = QuantumCircuit(net, cl, name='circ')
from numpy import arcsin, sqrt, pi
def probToAngle(prob):
"""
Converts a given P(1) value into an equivalent theta value.
"""
return 2*arcsin(sqrt(prob))
# Setting up a qubit to represent the variable P
circ.u3(probToAngle(0.35), 0, 0, net[0])
# Since we have P = 1, we use the second row of the probability table for the variable E
circ.u3(probToAngle(0.76), 0, 0, net[1])
# Setting up the qubit representing H assuming that E = 0
circ.u3(probToAngle(0.39), 0, 0, net[2])
def oracle(circ):
"""
Implements an oracle that flips the sign of states that contain P = 1.
"""
circ.cu3(pi, pi, 0, net[0], net[1])
circ.cu3(pi, pi, 0, net[0], net[1])
return circ
def u_gate(circ):
"""
Implements the U gate that flips states about the average amplitude.
"""
# Implements the quantum circuit that converts ψ -> |000...0>
circ.u3(-1*probToAngle(0.35), 0, 0, net[0])
circ.u3(-1*probToAngle(0.76), 0, 0, net[1])
circ.u3(-1*probToAngle(0.39), 0, 0, net[2])
# Flipping the |000...0> state using a triple controlled Z gate condtioned on P, E and H,
# and applied to the ancilla
circ.x(net)
circ.cu1(pi/4, net[0], net[3])
circ.cx(net[0], net[1])
circ.cu1(-pi/4, net[1], net[3])
circ.cx(net[0], net[1])
circ.cu1(pi/4, net[1], net[3])
circ.cx(net[1], net[2])
circ.cu1(-pi/4, net[2], net[3])
circ.cx(net[0], net[2])
circ.cu1(pi/4, net[2], net[3])
circ.cx(net[1], net[2])
circ.cu1(-pi/4, net[2], net[3])
circ.cx(net[0], net[2])
circ.cu1(pi/4, net[2], net[3])
circ.x(net)
# Implements the quantum circuit that converts |000...0> -> ψ
circ.u3(probToAngle(0.35), 0, 0, net[0])
circ.u3(probToAngle(0.76), 0, 0, net[1])
circ.u3(probToAngle(0.39), 0, 0, net[2])
return circ
# Apply oracle and U gate twice
circ = oracle(circ)
circ = u_gate(circ)
circ = oracle(circ)
circ = u_gate(circ)
circ.x(net[0])
# Measure E, and rotate H to the P(1) value in the second row of the P(H|E) table condtioned on E
circ.measure(net[1], cl[1])
circ.u3(probToAngle(0.82) - probToAngle(0.39), 0, 0, net[2])
# Sample by measuring the rest of the qubits
circ.measure(net[0], cl[0])
circ.measure(net[2], cl[2])
# Get backend from Aer provider
backend = Aer.get_backend('qasm_simulator')
# Run job many times to get multiple samples
samples_list = []
n_samples = 1000
for i in range(n_samples):
job = execute(circ, backend=backend, shots=1)
result = list(job.result().get_counts(circ).keys())[0]
if result[2] == '1':
samples_list.append(result)
# Printing the number of useful samples and percentage of samples rejected
print()
print(n_samples, 'samples drawn:', len(samples_list), 'samples accepted,', n_samples-len(samples_list), 'samples rejected.' )
print('Percentage of samples rejected: ', 100*(1 - (len(samples_list)/n_samples)), '%')
# Computing P(H = 0| P = 1)
p_H = 0
for i in samples_list:
if i[0] == '0':
p_H += 1
p_H /= len(samples_list)
print('P(H = 0| P = 1) =', p_H)
print()
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import *
from qiskit.aqua.algorithms import Grover
# First princinple for two parent nodes and one child
class byskit():
def __init__(self, backend, parents, child, evd = None):
self.backend = backend
self.parents = parents
self.child = child
self.n = int(np.shape(parents)[0]/2)
self.n_child = np.shape(child)[1]
self.ctrl = QuantumRegister(self.n, 'ctrl')
self.anc = QuantumRegister(self.n - 1, 'anc')
self.tgt = QuantumRegister(self.n_child, 'tgt')
if evd != None:
self.oracle = QuantumRegister(evd,'oracle')
self.circ = QuantumCircuit(self.ctrl, self.anc, self.tgt, self.oracle)
else:
self.circ = QuantumCircuit(self.ctrl, self.anc, self.tgt)
#self.c_ctrl = ClassicalRegister(self.n, 'c_ctrl')
#self.c_tgt = ClassicalRegister(self.n_child, 'c_tgt')
self.parent_init()
self.child_init()
def parent_init(self):
for i in range(self.n):
theta = self.calc_theta(self.parents[2*i], self.parents[2*i+1])
self.circ.ry(theta, i)
self.circ.barrier()
def child_init(self):
self.a = np.arange(0, 2 ** self.n)
self.gates = []
for i in self.a:
s = str(np.binary_repr(i, width=self.n))
self.gates.append(s)
for i in range(2**self.n):
self.xgate(self.gates[i])
for j in range(self.n_child):
theta = self.calc_theta(self.child[2 * i + 1,j], self.child[2 * i,j])
self.cn_ry(theta,j)
self.xgate(self.gates[i])
self.circ.barrier()
def xgate(self,gate):
for index, item in enumerate(gate):
if int(item) == 0:
self.circ.x(index)
#RY gates
def cn_ry(self,theta,target):
# compute
self.circ.ccx(self.ctrl[0], self.ctrl[1], self.anc[0])
for i in range(2, self.n):
self.circ.ccx(self.ctrl[i], self.anc[i - 2], self.anc[i - 1])
# copy
self.circ.cry(theta,self.anc[self.n - 2], self.tgt[target])
# uncompute
for i in range(self.n - 1, 1, -1):
self.circ.ccx(self.ctrl[i], self.anc[i - 2], self.anc[i - 1])
self.circ.ccx(self.ctrl[0], self.ctrl[1], self.anc[0])
def calc_theta(self,p1,p0):
return 2 * np.arctan(np.sqrt((p1)/(p0)))
def plot(self):
self.circ.draw(output='mpl')
plt.show()
def execute_circ(self):
self.circ.measure_all()
results = execute(self.circ, self.backend, shots=4321)
return results
def rejection_sampling(self, evidence, shots=1000, amplitude_amplification=False):
# Run job many times to get multiple samples
samples_list = []
self.n_samples = shots
if amplitude_amplification==True:
self.amplitude_amplification(evidence)
self.circ.measure_all()
#self.circ.measure((self.ctrl, self.tgt),(self.c_ctrl, self.c_tgt))
for i in range(self.n_samples):
job = execute(self.circ, backend=self.backend, shots=1)
result = list(job.result().get_counts(self.circ).keys())[0]
accept = True
for e in evidence:
if result[evidence[e]['n']]==evidence[e]['state']:
pass
else:
accept=False
if accept == True:
#print('Accepted result ', result)
samples_list.append(result)
print()
print(self.n_samples, 'samples drawn:', len(samples_list), 'samples accepted,', self.n_samples - len(samples_list),
'samples rejected.')
print('Percentage of samples rejected: ', 100 * (1 - (len(samples_list) / self.n_samples)), '%')
return samples_list
def evaluate(self, samples_list, observations):
p_o = 0
for sample in samples_list:
accept = True
for o in observations:
if sample[observations[o]['n']] == observations[o]['state']:
pass
else:
accept = False
if accept == True:
#print('Observation true given evidence')
p_o += 1
p_o /= len(samples_list)
print('Probabilty of observations given evidence = ', p_o)
return p_o
def amplitude_amplification(self,evidence):
self.state_preparation = self.circ
self.oracle = QuantumCircuit(self.ctrl, self.anc, self.tgt)
for index, e in enumerate(evidence):
if evidence[e]['state'] == '1':
self.oracle.z([evidence[e]['n']])
self.grover_op = Grover(self.oracle, state_preparation=self.state_preparation)
self.grover_op.draw()
def oracle(self):
pass
def u_gate(self):
pass
def gen_random_weights(n_parent,n_child):
np.random.seed(0)
p = np.random.rand(n_parent)
parents = []
for i in p:
parents.append(i)
parents.append(1 - i)
parents = np.array(parents)
child = np.random.rand(2 ** (n_parent + 1), n_child)
for i in range(n_child):
for j in range(2 ** (n_parent)):
child[2 * j + 1, i] = 1 - child[2 * j, i]
return parents, child
if __name__=='__main__':
from qiskit import IBMQ
IBMQ.load_account()
#provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import Aer #BasicAer
#backend = BasicAer.get_backend('unitary_simulator')
backend = Aer.get_backend('qasm_simulator')
n_parent = 3
n_child = 3
parents, children = gen_random_weights(n_parent, n_child)
b = byskit(backend, parents, children)
b.plot()
evidence = {
'one':{
'n':1,
'state':'1'
}
}
#b.rejection_sampling(evidence,amplitude_amplification=True)
sample_list = b.rejection_sampling(evidence)
observations = {
'three':{
'n':2,
'state':'0'
}
}
prob = b.evaluate(sample_list, observations)
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import *
from qiskit.aqua.algorithms import Grover
# First princinple for two parent nodes and one child
class byskit():
def __init__(self, backend,network, loaded_net, evd = None):
self.backend = backend
self.network = network
self.net_keys = [key for key in self.network]
self.loaded_net = loaded_net
self.reg = {}
self.create_circ()
self.root_init()
child_index = np.array([0,0])
parent_index = np.array([0, 0])
for index in range(len(self.net_keys)-1):
parent_key = self.net_keys[index]
child_key = self.net_keys[index+1]
if parent_key != 'root':
parent_index = np.array([parent_index[1], parent_index[1] + self.network[self.net_keys[index + 1]]])
child_index = np.array([child_index[1],child_index[1]+self.network[self.net_keys[index+1]]])
self.child_init(parent_key,parent_index,child_key,child_index)
def create_circ(self):
self.n_anc = 0
self.n_tgt = 0
for key in self.network:
if key == 'root':
n = self.network['root']
self.reg['cntrl'] = QuantumRegister(self.network['root'], 'cntrl')
else:
self.n_anc = max(n-1,self.n_anc)
self.n_tgt += self.network[key]
n = self.network[key]
self.reg['anc'] = QuantumRegister(self.n_anc,'anc')
self.reg['tgt'] = QuantumRegister(self.n_tgt, 'tgt')
self.circ = QuantumCircuit(self.reg['cntrl'],self.reg['anc'],self.reg['tgt'])
def root_init(self):
for i in range(self.network['root']):
theta = self.calc_theta(self.loaded_net['root'][2*i], self.loaded_net['root'][2*i+1])
self.circ.ry(theta, i)
self.circ.barrier()
def child_init(self,parent_key,parent_index,child_key,child_index):
parent_index = parent_index[0]
child_index = child_index[0]
self.a = np.arange(0, 2 ** self.network[parent_key])
self.gates = []
for i in self.a:
s = str(np.binary_repr(i, width=self.network[parent_key]))
self.gates.append(s)
for i in range(2**self.network[parent_key]):
self.xgate(self.gates[i],parent_index)
for j in range(self.network[child_key]):
theta = self.calc_theta(self.loaded_net[child_key][2 * i + 1,j], self.loaded_net[child_key][2 * i,j])
self.cn_ry(theta,j,parent_key,parent_index,child_key,child_index)
self.xgate(self.gates[i],parent_index)
self.circ.barrier()
def xgate(self,gate,parent_index):
for index, item in enumerate(gate):
if int(item) == 0:
self.circ.x(index+parent_index)
#RY gates
def cn_ry(self,theta,target,parent_key,parent_index,child_key,child_index):
# compute
if parent_key == 'root':
self.circ.ccx(self.reg['cntrl'][0], self.reg['cntrl'][1], self.reg['anc'][0])
for i in range(2, self.network[parent_key]):
self.circ.ccx(self.reg['cntrl'][i], self.reg['anc'][i - 2], self.reg['anc'][i - 1])
# copy
self.circ.cry(theta,self.reg['anc'][self.network[parent_key] - 2], self.reg['tgt'][target])
# uncompute
for i in range(self.network[parent_key] - 1, 1, -1):
self.circ.ccx(self.reg['cntrl'][i], self.reg['anc'][i - 2], self.reg['anc'][i - 1])
self.circ.ccx(self.reg['cntrl'][0], self.reg['cntrl'][1], self.reg['anc'][0])
else:
self.circ.ccx(self.reg['tgt'][parent_index+0], self.reg['tgt'][parent_index+1], self.reg['anc'][0])
for i in range(2, self.network[parent_key]):
self.circ.ccx(self.reg['tgt'][parent_index+i], self.reg['anc'][i - 2], self.reg['anc'][i - 1])
# copy
self.circ.cry(theta,self.reg['anc'][self.network[parent_key] - 2], self.reg['tgt'][child_index+target])
# uncompute
for i in range(self.network[parent_key] - 1, 1, -1):
self.circ.ccx(self.reg['tgt'][parent_index+i], self.reg['anc'][i - 2], self.reg['anc'][i - 1])
self.circ.ccx(self.reg['tgt'][parent_index+0], self.reg['tgt'][parent_index+1], self.reg['anc'][0])
def calc_theta(self,p1,p0):
return 2 * np.arctan(np.sqrt((p1)/(p0)))
def plot(self):
self.circ.draw(output='mpl')
plt.show()
def execute_circ(self):
self.circ.measure_all()
results = execute(self.circ, self.backend, shots=4321)
return results
def rejection_sampling(self, evidence, shots=5000, amplitude_amplification=False):
# Run job many times to get multiple samples
samples_list = []
self.n_samples = shots
if amplitude_amplification==True:
self.amplitude_amplification(evidence)
self.circ.measure_all()
for i in range(self.n_samples):
job = execute(self.circ, backend=self.backend, shots=1)
result = list(job.result().get_counts(self.circ).keys())[0]
accept = True
for e in evidence:
if result[evidence[e]['n']]==evidence[e]['state']:
pass
else:
accept=False
if accept == True:
#print('Accepted result ', result)
samples_list.append(result)
print()
print(self.n_samples, 'samples drawn:', len(samples_list), 'samples accepted,', self.n_samples - len(samples_list),
'samples rejected.')
print('Percentage of samples rejected: ', 100 * (1 - (len(samples_list) / self.n_samples)), '%')
return samples_list
def evaluate(self, samples_list, observations):
p_o = 0
for sample in samples_list:
accept = True
for o in observations:
if sample[observations[o]['n']] == observations[o]['state']:
pass
else:
accept = False
if accept == True:
#print('Observation true given evidence')
p_o += 1
p_o /= len(samples_list)
print('Probabilty of observations given evidence = ', p_o)
return p_o
def amplitude_amplification(self,evidence):
self.state_preparation = self.circ
self.oracle = QuantumCircuit(self.ctrl, self.anc, self.tgt)
for index, e in enumerate(evidence):
if evidence[e]['state'] == '1':
self.oracle.z([evidence[e]['n']])
self.grover_op = Grover(self.oracle, state_preparation=self.state_preparation)
self.grover_op.draw()
def oracle(self):
pass
def u_gate(self):
pass
def gen_random_net(network):
np.random.seed(0)
loaded_net = {}
for key in network:
if key == 'root':
n_parent = network[key]
p = np.random.rand(n_parent)
parents = []
for i in p:
parents.append(i)
parents.append(1 - i)
loaded_net[key] = np.array(parents)
else:
n_child = network[key]
child = np.random.rand(2 ** (n_parent + 1), n_child)
for i in range(n_child):
for j in range(2 ** (n_parent)):
child[2 * j + 1, i] = 1 - child[2 * j, i]
loaded_net[key] = child
n_parent = n_child
return loaded_net
if __name__=='__main__':
from qiskit import IBMQ
IBMQ.load_account()
#provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import Aer #BasicAer
#backend = BasicAer.get_backend('unitary_simulator')
backend = Aer.get_backend('qasm_simulator')
#network = {'root':2,'child-1':3,'child-2':3,'child-3':2}
network = {'root':2,'child-1':3,'child-2':3}
loaded_net = gen_random_net(network)
b = byskit(backend, network, loaded_net)
b.plot()
evidence = {
'one':{
'n':1,
'state':'1'
},
'two':{
'n':5,
'state':'0'
}
}
#b.rejection_sampling(evidence,amplitude_amplification=True)
sample_list = b.rejection_sampling(evidence, shots=1000,amplitude_amplification=False)
observations = {
'one':{
'n':2,
'state':'0'
},
'two': {
'n': 4,
'state': '1'
}
}
prob = b.evaluate(sample_list, observations)
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
from qiskit import Aer
backend = Aer.get_backend('qasm_simulator')
network = {'root':2,'child-1':3,'child-2':3}
loaded_net = gen_random_net(network)
b = byskit(backend, network, loaded_net)
b.plot()
evidence = {
'one':{
'n':1,
'state':'1'
},
'two':{
'n':5,
'state':'0'
}
}
sample_list = b.rejection_sampling(evidence, shots=1000, amplitude_amplification=True)
observations = {
'one':{
'n':2,
'state':'0'
},
'two': {
'n': 4,
'state': '1'
}
}
prob = b.evaluate(sample_list, observations)
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
from qiskit import IBMQ
IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
#provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import BasicAer
backend = BasicAer.get_backend('qasm_simulator')
# Include matplot lib inline plotting for graphical output and plotting
%matplotlib inline
from byskit import byskit,gen_random_weights
n_parent = 2
n_child = 3
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
# Trying to get results out
results = b.execute_circ().result()
# Import histogram visualisation tool and plot results
from qiskit.tools.visualization import plot_histogram
plot_histogram(results.get_counts(b.circ))
n_parent = 2
n_child = 4
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
results = b.execute_circ().result()
plot_histogram(results.get_counts(b.circ))
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
#initialization
import matplotlib.pyplot as plt
import numpy as np
# importing Qiskit
from qiskit import IBMQ, Aer, assemble, transpile
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit.providers.ibmq import least_busy
# import basic plot tools
from qiskit.visualization import plot_histogram
n = 2
grover_circuit = QuantumCircuit(n)
def initialize_s(qc, qubits):
"""Apply a H-gate to 'qubits' in qc"""
for q in qubits:
qc.h(q)
return qc
def diffuser(nqubits):
qc = QuantumCircuit(nqubits)
# Apply transformation |s> -> |00..0> (H-gates)
for qubit in range(nqubits):
qc.h(qubit)
# Apply transformation |00..0> -> |11..1> (X-gates)
for qubit in range(nqubits):
qc.x(qubit)
# Do multi-controlled-Z gate
qc.h(nqubits-1)
qc.mct(list(range(nqubits-1)), nqubits-1) # multi-controlled-toffoli
qc.h(nqubits-1)
# Apply transformation |11..1> -> |00..0>
for qubit in range(nqubits):
qc.x(qubit)
# Apply transformation |00..0> -> |s>
for qubit in range(nqubits):
qc.h(qubit)
# We will return the diffuser as a gate
U_s = qc.to_gate()
U_s.name = "U$_s$"
return U_s
grover_circuit = initialize_s(grover_circuit, [0,1])
grover_circuit.draw(output="mpl")
grover_circuit.cz(0,1) # Oracle
grover_circuit.draw(output="mpl")
# Diffusion operator (U_s)
grover_circuit.append(diffuser(n),[0,1])
grover_circuit.draw(output="mpl")
sim = Aer.get_backend('aer_simulator')
# we need to make a copy of the circuit with the 'save_statevector'
# instruction to run on the Aer simulator
grover_circuit_sim = grover_circuit.copy()
grover_circuit_sim.save_statevector()
qobj = assemble(grover_circuit_sim)
result = sim.run(qobj).result()
statevec = result.get_statevector()
from qiskit_textbook.tools import vector2latex
vector2latex(statevec, pretext="|\\psi\\rangle =")
grover_circuit.measure_all()
aer_sim = Aer.get_backend('aer_simulator')
qobj = assemble(grover_circuit)
result = aer_sim.run(qobj).result()
counts = result.get_counts()
plot_histogram(counts)
nqubits = 4
qc = QuantumCircuit(nqubits)
# Apply transformation |s> -> |00..0> (H-gates)
for qubit in range(nqubits):
qc.h(qubit)
# Apply transformation |00..0> -> |11..1> (X-gates)
for qubit in range(nqubits):
qc.x(qubit)
qc.barrier()
# Do multi-controlled-Z gate
qc.h(nqubits-1)
qc.mct(list(range(nqubits-1)), nqubits-1) # multi-controlled-toffoli
qc.h(nqubits-1)
qc.barrier()
# Apply transformation |11..1> -> |00..0>
for qubit in range(nqubits):
qc.x(qubit)
# Apply transformation |00..0> -> |s>
for qubit in range(nqubits):
qc.h(qubit)
qc.draw(output="mpl")
from qiskit.circuit import classical_function, Int1
# define a classical function f(x): this returns 1 for the solutions of the problem
# in this case, the solutions are 1010 and 1100
@classical_function
def f(x1: Int1, x2: Int1, x3: Int1, x4: Int1) -> Int1:
return (x1 and not x2 and x3 and not x4) or (x1 and x2 and not x3 and not x4)
nqubits = 4
Uf = f.synth() # turn it into a circuit
oracle = QuantumCircuit(nqubits+1)
oracle.compose(Uf, inplace=True)
oracle.draw(output="mpl")
# We will return the diffuser as a gate
#U_f = oracle.to_gate()
# U_f.name = "U$_f$"
# return U_f
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
from qiskit import IBMQ
IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import BasicAer
backend = BasicAer.get_backend('unitary_simulator')
%matplotlib inline
from byskit import byskit,gen_random_weights
n_parent = 3
n_child = 3
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
results = b.execute_circ()
plot_histogram(results.get_counts(b.circ))
n_parent = 2
n_child = 4
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
results = b.execute_circ()
plot_histogram(results.get_counts(b.circ))
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
from qiskit import IBMQ
IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
#provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import BasicAer
backend = BasicAer.get_backend('qasm_simulator')
# Include matplot lib inline plotting for graphical output and plotting
%matplotlib inline
from byskit import byskit,gen_random_weights
n_parent = 2
n_child = 3
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
# Trying to get results out
results = b.execute_circ().result()
# Import histogram visualisation tool and plot results
from qiskit.tools.visualization import plot_histogram
plot_histogram(results.get_counts(b.circ))
n_parent = 2
n_child = 4
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
results = b.execute_circ().result()
plot_histogram(results.get_counts(b.circ))
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import *
# First princinple for two parent nodes and one child
class byskit():
def __init__(self, provider, backend, n, parents, child):
self.provider = provider
self.backend = backend
self.parents = parents
self.child = child
self.n = n
self.ctrl = QuantumRegister(self.n, 'ctrl')
self.anc = QuantumRegister(self.n - 1, 'anc')
self.tgt = QuantumRegister(1, 'tgt')
self.circ = QuantumCircuit(self.ctrl, self.anc, self.tgt)
self.parent_init()
self.child_init()
self.circ.draw(output='mpl')
plt.show()
def parent_init(self):
for i in range(self.n):
theta = self.calc_theta(self.parents[2*i], self.parents[2*i+1])
self.circ.ry(theta, i)
self.circ.barrier()
def child_init(self):
self.a = np.arange(0, 2 ** self.n)
self.gates = []
for i in self.a:
s = str(np.binary_repr(i, width=self.n))
self.gates.append(s)
for i in range(2**self.n):
theta = self.calc_theta(self.child[2*i+1], self.child[2*i])
self.xgate(self.gates[i])
self.cn_ry(theta)
self.xgate(self.gates[i])
self.circ.barrier()
def xgate(self,gate):
for index, item in enumerate(gate):
if int(item) == 0:
self.circ.x(index)
#RY gates
def cn_ry(self,theta):
# compute
self.circ.ccx(self.ctrl[0], self.ctrl[1], self.anc[0])
for i in range(2, self.n):
self.circ.ccx(self.ctrl[i], self.anc[i - 2], self.anc[i - 1])
# copy
self.circ.cry(theta,self.anc[self.n - 2], self.tgt[0])
# uncompute
for i in range(self.n - 1, 1, -1):
self.circ.ccx(self.ctrl[i], self.anc[i - 2], self.anc[i - 1])
self.circ.ccx(self.ctrl[0], self.ctrl[1], self.anc[0])
def calc_theta(self,p1,p0):
return 2 * np.arctan(np.sqrt((p1)/(p0)))
#if __name__=='__main__':
from jupyterthemes import jtplot
jtplot.style(theme='monokai', context='notebook', ticks=True, grid=False)
from qiskit.tools.jupyter import *
from qiskit import IBMQ
IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import BasicAer
backend = BasicAer.get_backend('unitary_simulator')
n = 3
parents = np.random.rand(n*2)
child = np.random.rand(2**(n+1))
b = byskit(provider,backend,n,parents,child)
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import *
import matplotlib.pyplot as plt
n = 5 # must be >= 2
ctrl = QuantumRegister(n, 'ctrl')
anc = QuantumRegister(n-1, 'anc')
tgt = QuantumRegister(1, 'tgt')
circ = QuantumCircuit(ctrl, anc, tgt)
# compute
circ.ccx(ctrl[0], ctrl[1], anc[0])
for i in range(2, n):
circ.ccx(ctrl[i], anc[i-2], anc[i-1])
# copy
circ.cx(anc[n-2], tgt[0])
# uncompute
for i in range(n-1, 1, -1):
circ.ccx(ctrl[i], anc[i-2], anc[i-1])
circ.ccx(ctrl[0], ctrl[1], anc[0])
circ.draw(output='mpl')
plt.show()
'''ctrl = QuantumRegister(n, 'ctrl')
anc = QuantumRegister(n-1, 'anc')
tgt = QuantumRegister(1, 'tgt')
circ = QuantumCircuit(ctrl, anc, tgt)
circ.mct(ctrl, tgt[0], anc, mode='basic-dirty-ancilla')
circ.draw(output='mpl')
plt.show()
'''
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import *
# First princinple for two parent nodes and one child
class byskit():
def __init__(self, provider, backend, n, parents, child):
self.provider = provider
self.backend = backend
self.parents = parents
self.child = child
self.n = n
self.ctrl = QuantumRegister(self.n, 'ctrl')
self.anc = QuantumRegister(self.n - 1, 'anc')
self.tgt = QuantumRegister(1, 'tgt')
self.circ = QuantumCircuit(self.ctrl, self.anc, self.tgt)
self.parent_init()
self.child_init()
self.circ.draw(output='mpl')
plt.show()
def parent_init(self):
for i in range(self.n):
theta = self.calc_theta(self.parents[2*i], self.parents[2*i+1])
self.circ.ry(theta, i)
self.circ.barrier()
self.circ.x(self.ctrl)
def child_init(self):
self.a = np.arange(0, 2 ** self.n)
gates = []
for i in self.a:
s = str(np.binary_repr(i, width=self.n))
gates.append(s)
for i in range(2*self.n):
theta = self.calc_theta(self.child[2*i+1], self.child[2*i])
for index2,item2 in enumerate(gates[i]):
print(item2)
if int(item2) == 0:
self.circ.x(index2)
self.cn_ry(theta)
for index2,item2 in enumerate(gates[i]):
print(item2)
if int(item2) == 0:
self.circ.x(index2)
self.circ.barrier()
#RY gates
def cn_ry(self,theta):
# compute
self.circ.ccx(self.ctrl[0], self.ctrl[1], self.anc[0])
for i in range(2, self.n):
self.circ.ccx(self.ctrl[i], self.anc[i - 2], self.anc[i - 1])
# copy
self.circ.cry(theta,self.anc[self.n - 2], self.tgt[0])
# uncompute
for i in range(self.n - 1, 1, -1):
self.circ.ccx(self.ctrl[i], self.anc[i - 2], self.anc[i - 1])
self.circ.ccx(self.ctrl[0], self.ctrl[1], self.anc[0])
def calc_theta(self,p1,p0):
return 2 * np.arctan(np.sqrt((p1)/(p0)))
if __name__=='__main__':
from jupyterthemes import jtplot
jtplot.style(theme='monokai', context='notebook', ticks=True, grid=False)
from qiskit.tools.jupyter import *
from qiskit import IBMQ
IBMQ.load_account()
# provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
provider = IBMQ.get_provider(hub='ibm-q-oxford', group='on-boarding', project='on-boarding-proj')
from qiskit import BasicAer
backend = BasicAer.get_backend('unitary_simulator')
a0 = 0.2
a1 = 0.8
b0 = 0.3
b1 = 0.7
c000 = 0.15
c001 = 0.3
c010 = 0.4
c011 = 0.1
c100 = 0.85
c101 = 0.7
c110 = 0.6
c111 = 0.9
parents = np.array([a0,a1,b0,b1])
child = np.array([c000,c100,c001,c101,c010,c110,c011,c111])
n = 2
b = byskit(provider,backend,n,parents,child)
print(np.shape(b.parents))
print(np.shape(b.child))
|
https://github.com/mlvqc/Byskit
|
mlvqc
|
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
from qiskit import BasicAer
from qiskit.tools.visualization import plot_histogram
backend = BasicAer.get_backend('qasm_simulator')
%matplotlib inline
from byskit import byskit,gen_random_weights
n_parent = 3
n_child = 3
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
results = b.execute_circ()
plot_histogram(results.results().get_counts(b.circ))
n_parent = 2
n_child = 4
parents,children = gen_random_weights(n_parent,n_child)
b = byskit(backend,parents,children)
b.circ.draw(output='mpl')
results = b.execute_circ()
plot_histogram(results.results().get_counts(b.circ))
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
# !pip install --upgrade pip
# !pip uninstall tensorflow --y
# !pip install tensorflow
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load csv file
import pandas as pd
# numpy to the seed
import numpy as np
# load csv fileframework to neural networks
import tensorflow as tf
#Method forthe neural network
from keras.regularizers import l2
from keras.models import Sequential
from keras.layers import Dense, Dropout
#save as image the model summary
from keras.utils.vis_utils import plot_model
# librariesto plot
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
data_train = pd.read_csv("fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
np.random.seed(123)
tf.random.set_seed(123)
scale = StandardScaler()
scale.fit(X_train)
X_train_std = scale.transform(X_train)
X_test_std = scale.transform(X_test)
X_train_std[1], y_train[1]
model = Sequential()
model.add(Dense(25, input_dim=16, activation='relu', kernel_regularizer=l2(1e-6),kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# Compile model
auc = tf.keras.metrics.AUC()
model.compile(loss='binary_crossentropy', optimizer="Adam", metrics=['accuracy',auc])
model_history = model.fit(X_train_std, y_train, epochs=100,
batch_size=32,
validation_split=0.2, shuffle=True)
train_pred = model.predict(X_train_std)
test_pred = model.predict(X_test_std)
y_train_pred = (model.predict(X_train_std) > 0.5).astype("int32")
y_test_pred = (model.predict(X_test_std) > 0.5).astype("int32")
accuracy = accuracy_score(y_train, y_train_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_train, y_train_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_train, y_train_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_train, y_train_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_train, y_train_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_train, y_train_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
train_matrix = confusion_matrix(y_train, y_train_pred)
print(train_matrix)
ax = sns.heatmap(train_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
# to use dataframe and load csv file
import pandas as pd
# to use for mathematical operations
import numpy as np
# split the set in 2 set, common train and test
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# plot different designs
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(123)
data = pd.read_csv("dataset/BackOrders.csv",header=0)
data.shape
data.head()
for col in ['potential_issue',
'deck_risk',
'oe_constraint',
'ppap_risk',
'stop_auto_buy',
'rev_stop',
'went_on_backorder']:
data[col]=pd.factorize(data[col])[0]
data.describe(include='all')
data['perf_6_month_avg']=data['perf_6_month_avg'].replace(-99, np.NaN)
data['perf_12_month_avg']=data['perf_12_month_avg'].replace(-99, np.NaN)
varnames=list(data)[1:]
correlations = data[varnames].corr()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,22,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(varnames,rotation=90)
ax.set_yticklabels(varnames)
plt.show()
data.drop('rev_stop', axis=1, inplace=True)
data.drop('oe_constraint', axis=1, inplace=True)
data.drop('potential_issue', axis=1, inplace=True)
data.drop('stop_auto_buy', axis=1, inplace=True)
data.drop('deck_risk', axis=1, inplace=True)
def check_missing(data):
tot = data.isnull().sum().sort_values(ascending=False)
perc = ( round(100*data.isnull().sum()/data.isnull().count(),1) ).sort_values(ascending=False)
missing_data = pd.concat([tot, perc], axis=1, keys=['Missing', 'Percent'])
return missing_data[:3]
check_missing(data)
data.fillna(data.median(), inplace=True)
data
check_missing(data)
data.drop('sku', axis=1, inplace=True)
data
X, y = data.loc[:,data.columns!='went_on_backorder'].values, data.loc[:,'went_on_backorder'].values
X,y
X_train_1, X_test, y_train_1, y_test = train_test_split(X, y, test_size=0.1, random_state=123, stratify = data['went_on_backorder'])
print(X_train_1.shape)
print(X_test.shape)
print(pd.value_counts(y_train)/y_train.size * 100)
print(pd.value_counts(y_test)/y_test.size * 100)
def balance_split(X_train_1,y_train_1,flag=""):
X_train0 = []
X_train1 = []
y_train0 = []
y_train1 = []
for i in range(len(y_train_1)):
if y_train_1[i] == 0:
X_train0.append(X_train_1[i])
y_train0.append(y_train_1[i])
else:
X_train1.append(X_train_1[i])
y_train1.append(y_train_1[i])
X_train =[]
y_train = []
if flag == "fair":
X_train = X_train0[:1000] + X_train1[:1000]
y_train = y_train0[:1000] + y_train1[:1000]
else:
X_train = X_train0[:10000] + X_train1
y_train = y_train0[:10000] + y_train1
return np.asarray(X_train),np.asarray(y_train)
def save_data(X_train_1,y_train_1,flag="",name=""):
if flag == "fair":
X_train,y_train = balance_split(X_train_1,y_train_1,flag)
df_train = pd.concat([pd.DataFrame(X_train), pd.DataFrame(y_train)], axis=1)
df_train.to_csv("dataset/fair_"+name+".csv", index=False)
else:
X_train,y_train = balance_split(X_train_1,y_train_1)
df_train = pd.concat([pd.DataFrame(X_train), pd.DataFrame(y_train)], axis=1)
df_train.to_csv("dataset/classic_"+name+".csv", index=False)
#classic data
save_data(X_train_1,y_train_1,flag="classic",name="train")
#fair data
save_data(X_train_1,y_train_1,flag="fair",name="train")
#classic data
save_data(X_test,y_test,flag="classic",name="test")
data = pd.read_csv("dataset/classic_train.csv")
X,y = data[data.columns[:16]].values, data[data.columns[16]].values
X.shape, y.shape
data = pd.read_csv("dataset/fair_train.csv")
X,y = data[data.columns[:16]].values, data[data.columns[16]].values
X.shape, y.shape
data = pd.read_csv("dataset/classic_test.csv")
X,y = data[data.columns[:16]].values, data[data.columns[16]].values
X.shape, y.shape
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.qubit", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.mixed", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
@qml.transforms.insert(qml.AmplitudeDamping, 0.2, position="end")
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_layers_noise.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
from sklearn.metrics import ConfusionMatrixDisplay
import numpy as np
import matplotlib.pyplot as plt
ConfusionMatrixDisplay(confusion_matrix).plot()
confusion_matrix = np.array([[4684, 346],[829, 300]])
plt.figure()
plt.imshow(confusion_matrix, cmap="Blues")
for i in range(2):
for j in range(2):
plt.text(j, i, format(confusion_matrix[i, j]),
ha="center", va="center",
color="white" if confusion_matrix[i, j] > 2500 else "black", fontsize=18)
plt.xticks([0,1], [0,1], fontsize=14)
plt.yticks([0,1], [0,1], fontsize=14)
plt.xlabel("Predicted Label", fontsize=14)
plt.ylabel("True Label", fontsize=14)
plt.colorbar()
confusion_matrix = np.array([[3690, 1070],[182, 947]])
plt.figure()
plt.imshow(confusion_matrix, cmap="Blues")
for i in range(2):
for j in range(2):
plt.text(j, i, format(confusion_matrix[i, j]),
ha="center", va="center",
color="white" if confusion_matrix[i, j] > 2500 else "black", fontsize=18)
plt.xticks([0,1], [0,1], fontsize=14)
plt.yticks([0,1], [0,1], fontsize=14)
plt.xlabel("Predicted Label", fontsize=14)
plt.ylabel("True Label", fontsize=14)
plt.colorbar()
confusion_matrix = np.array([[3835, 1195],[237, 892]])
plt.figure()
plt.imshow(confusion_matrix, cmap="Blues")
for i in range(2):
for j in range(2):
plt.text(j, i, format(confusion_matrix[i, j]),
ha="center", va="center",
color="white" if confusion_matrix[i, j] > 2500 else "black", fontsize=18)
plt.xticks([0,1], [0,1], fontsize=14)
plt.yticks([0,1], [0,1], fontsize=14)
plt.xlabel("Predicted Label", fontsize=14)
plt.ylabel("True Label", fontsize=14)
plt.colorbar()
https://arxiv.org/pdf/2010.07335.pdf
https://arxiv.org/pdf/2105.10162.pdf
https://arxiv.org/pdf/2203.01340.pdf maria
https://arxiv.org/pdf/1810.03787.pdf MERA
https://arxiv.org/pdf/1905.01426.pdf MERA 2
https://pennylane.ai/qml/demos/tutorial_tn_circuits.html
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
# %pip install qiskit
# %pip install docplex
# %pip install qiskit_optimization
# %pip install networkx
# %pip install geopandas
# %pip install folium
import networkx as nx
import geopandas as gpd
import folium
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
import time
import random
from IPython.display import display
from qiskit import IBMQ
from qiskit.algorithms import QAOA, VQE
from qiskit.circuit.library import TwoLocal
from qiskit.algorithms.optimizers import COBYLA, SPSA # Classical simulator
from qiskit.providers.aer import AerSimulator
from docplex.mp.model import Model
from qiskit_optimization.runtime import QAOAClient, VQEClient
from qiskit_optimization.converters import QuadraticProgramToQubo
from qiskit_optimization.translators import from_docplex_mp
from qiskit_optimization.algorithms import CplexOptimizer, MinimumEigenOptimizer
path = gpd.datasets.get_path('nybb')
df = gpd.read_file(path)
m = folium.Map(location=[40.70, -73.94], zoom_start=10, max_zoom=12, tiles='CartoDB positron')
# Project to NAD83 projected crs
df = df.to_crs(epsg=2263)
# Access the centroid attribute of each polygon
df['centroid'] = df.centroid
# Project to WGS84 geographic crs
# geometry (active) column
df = df.to_crs(epsg=4326)
# Centroid column
df['centroid'] = df['centroid'].to_crs(epsg=4326)
np.random.seed(7)
locations = []
ids = 0
for _, r in df.iterrows():
lat, lon = r['centroid'].y, r['centroid'].x
for i in range(2):
lat_rand, lon_rand = lat + 0.2 * np.random.rand(), lon +0.1 * np.random.rand()
locations.append((lon_rand, lat_rand))
folium.Marker(location=[lat_rand, lon_rand], popup=f'Id: {ids}').add_to(m)
ids += 1
center = np.array(locations).mean(axis=0)
locations = [(center[0], center[1])] + locations
folium.CircleMarker(location=[center[1], center[0]], radius=10, popup="<stong>Warehouse</stong>",
color="red",fill=True, fillOpacity=1, fillColor="tab:red").add_to(m)
m
# Normalizing the results to produce a graph in Graphx
companies = np.array(locations)
companies -= companies[0]
companies /= (np.max(np.abs(companies), axis=0))
r = list(np.sqrt(np.sum(companies ** 2, axis=1)))
threshold = 1 # Limit for to not consider the route from company i to j if the distance is larger than a threshold
n_companies = len(companies)
G = nx.Graph(name="VRP")
G.add_nodes_from(range(n_companies))
# G.add_weighted_edges_from([0, i, w] for i, w in zip(range(1, n_companies), r[1:]))
np.random.seed(2)
count = 0
for i in range(n_companies):
for j in range(n_companies):
if i != j:
rij = np.sqrt(np.sum((companies[i] - companies[j])**2))
if (rij < threshold) or (0 in [i, j]):
count +=1
G.add_weighted_edges_from([[i, j, rij]])
r.append(rij)
colors = [plt.cm.get_cmap("coolwarm")(x) for x in r[1:]]
nx.draw(G, pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
print(f"The number of edges of this problem is: {len(G.edges)}")
mdl = Model(name="VRP")
n_trucks = 3 # number of K trucks
x = {}
for i, j in G.edges():
x[(i, j)] = mdl.binary_var(name=f"x_{i}_{j}") # Adding route from company i to company j as a binary variable
x[(j, i)] = mdl.binary_var(name=f"x_{j}_{i}") # Adding route from company j to company i as a binary variable
print(f"The number of qubits needed to solve the problem is: {mdl.number_of_binary_variables}")
cost_func = mdl.sum(w["weight"] * x[(i, j)] for i, j, w in G.edges(data=True)) + mdl.sum(w["weight"] * x[(j, i)] for i, j, w in G.edges(data=True))
mdl.minimize(cost_func)
# Constraint 1a(yellow Fig. above): Only one truck goes out from company i
for i in range(1, n_companies):
mdl.add_constraint(mdl.sum(x[i, j] for j in range(n_companies) if (i, j) in x.keys()) == 1)
# Constraint 1b (yellow Fig. above): Only one truck comes into company j
for j in range(1, n_companies):
mdl.add_constraint(mdl.sum(x[i, j] for i in range(n_companies) if (i, j) in x.keys()) == 1)
# Constraint 2: (orange Fig. above) For the warehouse
mdl.add_constraint(mdl.sum(x[i, 0] for i in range(1, n_companies)) == n_trucks)
mdl.add_constraint(mdl.sum(x[0, j] for j in range(1, n_companies)) == n_trucks)
# Constraint 3: (blue Fig. above) To eliminate sub-routes
companies_list = list(range(1, n_companies))
subroute_set = []
for i in range(2, len(companies_list) + 1):
for comb in itertools.combinations(companies_list, i):
subroute_set.append(list(comb)) #subset points
for subroute in subroute_set:
constraint_3 = []
for i, j in itertools.permutations(subroute, 2): #iterating over all the subset points
if (i, j) in x.keys():
constraint_3.append(x[(i,j)])
elif i == j:
pass
else:
constraint_3 = []
break
if len(constraint_3) != 0:
mdl.add_constraint(mdl.sum(constraint_3) <= len(subroute) - 1)
quadratic_program = from_docplex_mp(mdl)
print(quadratic_program.export_as_lp_string())
sol = CplexOptimizer().solve(quadratic_program)
print(sol.prettyprint())
solution_cplex = sol.raw_results.as_name_dict()
G_sol = nx.Graph()
G_sol.add_nodes_from(range(n_companies))
for i in solution_cplex:
nodes = i[2:].split("_")
G_sol.add_edge(int(nodes[0]), int(nodes[1]))
nx.draw(G_sol, pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
qubo = QuadraticProgramToQubo(penalty=15).convert(quadratic_program)
num_vars = qubo.get_num_binary_vars()
print(f"To represent the inital problem with {mdl.number_of_binary_variables} variables, the QUBO representation needs {num_vars} variables")
new_qubo = qubo.substitute_variables(sol.variables_dict)
start = time.time()
sol_slack = CplexOptimizer().solve(new_qubo)
end = time.time() - start
print(f"The solver needs {np.round(end, 3)} seconds to finish.")
qubo_no_slack = qubo.substitute_variables(sol_slack.variables_dict)
sol_no_slack = CplexOptimizer().solve(qubo_no_slack)
solution_slack = sol_no_slack.raw_results.as_name_dict()
G_sol_slack = nx.Graph()
G_sol_slack.add_nodes_from(range(n_companies))
for i in solution_slack:
nodes = i[2:].split("_")
G_sol.add_edge(int(nodes[0]), int(nodes[1]))
nx.draw(G_sol, pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
index_1s = [k for k, v in sol_no_slack.variables_dict.items() if v == 1]
index_0s = [k for k, v in sol_no_slack.variables_dict.items() if v == 0]
def Optimization_QAOA(qubo, reps=1, optimizer=COBYLA(maxiter=1000), backend=None,
shots=1024, alpha=1.0, provider=None, local=False, error_mitigation=False):
intermediate_info = {'nfev': [],
'parameters': [],
'stddev': [],
'mean': []
}
def callback(nfev, parameters, mean, stddev):
# display(f"Evaluation: {nfev}, Energy: {mean}, Std: {stddev}")
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['mean'].append(mean)
intermediate_info['stddev'].append(stddev)
if local:
qaoa_mes = QAOA(optimizer=optimizer, reps=reps, quantum_instance=AerSimulator(),
callback=callback)
else:
qaoa_mes = QAOAClient(provider=provider, backend=backend, reps=reps, alpha=alpha,
shots=shots, callback=callback, optimizer=optimizer,
optimization_level=3,measurement_error_mitigation=error_mitigation)
qaoa = MinimumEigenOptimizer(qaoa_mes)
result = qaoa.solve(qubo)
return result, intermediate_info
def Optimization_VQE(qubo, ansatz, optimizer=SPSA(maxiter=50), backend=None,
shots=1024, provider=None, local=False, error_mitigation=False):
intermediate_info = {'nfev': [],
'parameters': [],
'stddev': [],
'mean': []
}
def callback(nfev, parameters, mean, stddev):
# display(f"Evaluation: {nfev}, Energy: {mean}, Std: {stddev}")
intermediate_info['nfev'].append(nfev)
intermediate_info['parameters'].append(parameters)
intermediate_info['mean'].append(mean)
intermediate_info['stddev'].append(stddev)
if local:
vqe_mes = VQE(ansatz=ansatz, quantum_instance=AerSimulator(),
callback=callback, optimizer=optimizer)
else:
vqe_mes = VQEClient(ansatz=ansatz, provider=provider, backend=backend, shots=shots,
callback=callback, optimizer=optimizer,measurement_error_mitigation=error_mitigation)
vqe = MinimumEigenOptimizer(vqe_mes)
result = vqe.solve(qubo)
return result, intermediate_info
def graph(solution, solution_not_kept):
G = nx.Graph()
G.add_nodes_from(range(n_companies))
for k, v in solution.items():
if v == 1:
nodes = k[2:].split("_")
G.add_edge(int(nodes[0]), int(nodes[1]))
for k, v in solution_not_kept.items():
if v == 1:
nodes = k[2:].split("_")
G.add_edge(int(nodes[0]), int(nodes[1]))
return G
num_1s = 3
num_0s = 4
random.seed(1)
keep_1s = random.sample(index_1s, num_1s)
keep_0s = random.sample(index_0s, num_0s)
sol_qubo = sol.variables_dict
solution_not_kept7 = {i:sol_qubo[i] for i in sol_qubo.keys() if i not in keep_1s + keep_0s}
qubo_7vars = qubo_no_slack.substitute_variables(solution_not_kept7)
print(qubo_7vars.export_as_lp_string())
sol7_local_qaoa_cobyla = Optimization_QAOA(qubo_7vars, reps=2, optimizer=COBYLA(maxiter=100), local=True)
sol7_local_qaoa_spsa = Optimization_QAOA(qubo_7vars, reps=2, optimizer=SPSA(maxiter=100), local=True)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q-community', group='cdl-hackathon', project='main')
provider2 = IBMQ.get_provider(hub='ibm-q-research', group='guanajuato-1', project='main')
sol7_oslo_qaoa_cobyla = Optimization_QAOA(qubo_7vars, reps=2, local=False, backend=provider2.backend.ibm_oslo,
provider=provider2, optimizer=COBYLA(maxiter=50))
sol7_oslo_qaoa_spsa = Optimization_QAOA(qubo_7vars, reps=2, local=False, backend=provider2.backend.ibm_oslo,
provider=provider2, optimizer=SPSA(maxiter=50))
sol7_oslo_qaoa_spsa_mitig = Optimization_QAOA(qubo_7vars, reps=2, local=False, backend=provider2.backend.ibm_oslo,
provider=provider2, optimizer=SPSA(maxiter=50), error_mitigation=True)
ansatz = TwoLocal(qubo_7vars.get_num_vars(), rotation_blocks='ry', entanglement_blocks='cz')
sol7_local_vqe_spsa = Optimization_VQE(qubo_7vars, ansatz, local=True, optimizer=SPSA(maxiter=200))
sol7_local_vqe_cobyla = Optimization_VQE(qubo_7vars, ansatz, local=True, optimizer=COBYLA(maxiter=200))
sol7_lagos_vqe_cobyla = Optimization_VQE(qubo_7vars, ansatz, local=False, optimizer=COBYLA(maxiter=100),
backend=provider2.backend.ibm_lagos, provider=provider2)
sol7_lagos_vqe_spsa = Optimization_VQE(qubo_7vars, ansatz, local=False, optimizer=SPSA(maxiter=25),
backend=provider2.backend.ibm_lagos, provider=provider2)
sol7_lagos_vqe_spsa2 = Optimization_VQE(qubo_7vars, ansatz, local=False, optimizer=SPSA(maxiter=25),
backend=provider2.backend.ibm_lagos, provider=provider2, error_mitigation=True)
plt.figure()
plt.plot(sol7_lagos_vqe_spsa[1]["mean"], linestyle=":", color="seagreen", label="VQE-Lagos-SPSA")
plt.plot(sol7_lagos_vqe_spsa2[1]["mean"], linestyle="-.", color="indigo", label="VQE-Lagos-SPSA-Mitig")
plt.plot(sol7_lagos_vqe_cobyla[1]["mean"], color="olive", label="VQE-Lagos-COBYLA-Mitig")
plt.grid()
plt.savefig("./Images/inset.png")
plt.figure()
plt.plot(sol7_oslo_qaoa_spsa[1]["mean"], linestyle=":", color="seagreen", label="QAOA-Oslo-SPSA")
plt.plot(sol7_oslo_qaoa_cobyla[1]["mean"], color="olive", label="QAOA-Oslo-COBYLA")
plt.grid()
plt.savefig("./Images/inset2.png")
sol7 = {"local_qaoa_cobyla":sol7_local_qaoa_cobyla, "local_vqe_cobyla":sol7_local_vqe_cobyla,
"local_qaoa_spsa":sol7_local_qaoa_spsa, "local_vqe_spsa":sol7_local_vqe_spsa,
"oslo_qaoa_cobyla":sol7_oslo_qaoa_cobyla, "oslo_qaoa_spsa":sol7_oslo_qaoa_spsa,
"lagos_vqe_spsa":sol7_lagos_vqe_spsa, "lagos_vqe_cobyla_mitig":sol7_lagos_vqe_cobyla}
np.save("./Data/sol7.npy", sol7)
sol7 = [sol7_local_qaoa_cobyla, sol7_local_vqe_cobyla, sol7_local_qaoa_spsa, sol7_local_vqe_spsa]
q_alg = ["QAOA", "VQE"]
fig, ax = plt.subplots(3,2, figsize=(15,18))
ax[0,0].plot(sol7_local_qaoa_cobyla[1]["mean"], color="darkblue", label="QAOA-COBYLA")
ax[0,0].plot(sol7_local_qaoa_spsa[1]["mean"], linestyle="--", color="lightcoral", label="QAOA-SPSA")
ax[0,0].plot(sol7_oslo_qaoa_spsa[1]["mean"], linestyle=":", color="seagreen", label="QAOA-Oslo-SPSA")
ax[0,0].plot(sol7_oslo_qaoa_cobyla[1]["mean"], color="olive", label="QAOA-Oslo-COBYLA")
ax[0,1].plot(sol7_local_vqe_cobyla[1]["mean"], color="darkblue", label="VQE-COBYLA")
ax[0,1].plot(sol7_local_vqe_spsa[1]["mean"], linestyle="--", color="lightcoral", label="VQE-SPSA")
ax[0,1].plot(sol7_lagos_vqe_spsa[1]["mean"], linestyle=":", color="seagreen", label="VQE-Lagos-SPSA")
ax[0,1].plot(sol7_lagos_vqe_spsa2[1]["mean"], linestyle="-.", color="indigo", label="VQE-Lagos-SPSA-Mitig")
ax[0,1].plot(sol7_lagos_vqe_cobyla[1]["mean"], color="olive", label="VQE-Lagos-COBYLA-Mitig")
for i in range(2):
ax[0,i].set_xlabel("Iterations", fontsize=14)
ax[0,i].set_ylabel("Cost", fontsize=14)
ax[0,i].legend()
ax[0,i].grid()
plt.sca(ax[1,i])
nx.draw(graph(sol7[i][0].variables_dict,solution_not_kept7), pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
ax[1,i].set_title(f"{q_alg[i]}-COBYLA", fontsize=14)
plt.sca(ax[2,i])
nx.draw(graph(sol7[i+2][0].variables_dict,solution_not_kept7), pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
ax[2,i].set_title(f"{q_alg[i]}-SPSA", fontsize=14)
plt.savefig("./Images/Sol7Q.png")
num_1s = 5
num_0s = 10
random.seed(1)
keep_1s = random.sample(index_1s, num_1s)
keep_0s = random.sample(index_0s, num_0s)
sol_qubo = sol.variables_dict
solution_not_kept = {i:sol_qubo[i] for i in sol_qubo.keys() if i not in keep_1s + keep_0s}
qubo_15vars = qubo_no_slack.substitute_variables(solution_not_kept)
sol15_qasm_qaoa_cobyla = Optimization_QAOA(qubo_15vars, reps=3, local=False, backend=provider2.backend.ibmq_qasm_simulator,
provider=provider2, optimizer=COBYLA(maxiter=100))
solution15_qaoa = sol15_qasm_qaoa_cobyla[0].variables_dict
G_sol15_qaoa = nx.Graph()
G_sol15_qaoa.add_nodes_from(range(n_companies))
for k, v in solution15_qaoa.items():
if v == 1:
nodes = k[2:].split("_")
G_sol15_qaoa.add_edge(int(nodes[0]), int(nodes[1]))
for k, v in solution_not_kept.items():
if v == 1:
nodes = k[2:].split("_")
G_sol15_qaoa.add_edge(int(nodes[0]), int(nodes[1]))
nx.draw(G_sol15_qaoa, pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
sol15_qasm_qaoa_spsa = Optimization_QAOA(qubo_15vars, reps=3, local=False, backend=provider2.backend.ibmq_qasm_simulator,
provider=provider2, optimizer=SPSA(maxiter=25))
ansatz15 = TwoLocal(qubo_15vars.get_num_vars(), rotation_blocks='ry', entanglement_blocks='cz')
sol15_qasm_vqe_cobyla = Optimization_VQE(qubo_15vars,ansatz=ansatz15, local=False, backend=provider2.backend.ibmq_qasm_simulator,
provider=provider2, optimizer=COBYLA(maxiter=100))
solution15_vqe = sol15_qasm_vqe_cobyla[0].variables_dict
G_sol15_vqe = nx.Graph()
G_sol15_vqe.add_nodes_from(range(n_companies))
for k, v in solution15_vqe.items():
if v == 1:
nodes = k[2:].split("_")
G_sol15_vqe.add_edge(int(nodes[0]), int(nodes[1]))
for k, v in solution_not_kept.items():
if v == 1:
nodes = k[2:].split("_")
G_sol15_vqe.add_edge(int(nodes[0]), int(nodes[1]))
nx.draw(G_sol15_vqe, pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
sol15_qasm = {"qaoa_cobyla":sol15_qasm_qaoa_cobyla, "vqe_cobyla":sol15_qasm_vqe_cobyla}
np.save("./Data/sol15_qasm.npy", sol15_qasm)
solution15 = [G_sol15_qaoa, G_sol15_vqe]
fig, ax = plt.subplots(2,2, figsize=(15,10))
ax[0,0].plot(sol15_qasm_qaoa_cobyla[1]["mean"], color="darkblue", label="QAOA-COBYLA")
ax[0,1].plot(sol15_qasm_vqe_cobyla[1]["mean"], color="slateblue", label="VQE-COBYLA")
for i in range(2):
ax[0,i].grid()
ax[0,i].set_xlabel("Iterations", fontsize=14)
ax[0,i].set_ylabel("Cost", fontsize=14)
ax[0,i].legend()
plt.sca(ax[1,i])
nx.draw(solution15[i], pos=companies, with_labels=True, node_size=500,
edge_color=colors, width=1, font_color="white",font_size=14,
node_color = ["tab:red"] + (n_companies-1)*["darkblue"])
fig.savefig("./Images/Sol15Q.png")
num_1s = 8
num_0s = 12
random.seed(1)
keep_1s = random.sample(index_1s, num_1s)
keep_0s = random.sample(index_0s, num_0s)
sol_qubo = sol.variables_dict
solution_not_kept = {i:sol_qubo[i] for i in sol_qubo.keys() if i not in keep_1s + keep_0s}
qubo_20vars = qubo_no_slack.substitute_variables(solution_not_kept)
sol20_qasm_qaoa_cobyla_r1 = Optimization_QAOA(qubo_20vars, reps=1, local=False, backend=provider2.backend.ibmq_qasm_simulator,
provider=provider2, optimizer=COBYLA(maxiter=10))
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
# !pip install --upgrade pip
# !pip uninstall tensorflow --y
# !pip install tensorflow
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load csv file
import pandas as pd
# numpy to the seed
import numpy as np
# load csv fileframework to neural networks
import tensorflow as tf
#Method forthe neural network
from keras.regularizers import l2
from keras.models import Sequential
from keras.layers import Dense, Dropout
#save as image the model summary
from keras.utils.vis_utils import plot_model
# librariesto plot
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
data_train = pd.read_csv("fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
np.random.seed(123)
tf.random.set_seed(123)
scale = StandardScaler()
scale.fit(X_train)
X_train_std = scale.transform(X_train)
X_test_std = scale.transform(X_test)
X_train_std[1], y_train[1]
model = Sequential()
model.add(Dense(25, input_dim=16, activation='relu', kernel_regularizer=l2(1e-6),kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# Compile model
auc = tf.keras.metrics.AUC()
model.compile(loss='binary_crossentropy', optimizer="Adam", metrics=['accuracy',auc])
model_history = model.fit(X_train_std, y_train, epochs=100,
batch_size=32,
validation_split=0.2, shuffle=True)
train_pred = model.predict(X_train_std)
test_pred = model.predict(X_test_std)
y_train_pred = (model.predict(X_train_std) > 0.5).astype("int32")
y_test_pred = (model.predict(X_test_std) > 0.5).astype("int32")
accuracy = accuracy_score(y_train, y_train_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_train, y_train_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_train, y_train_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_train, y_train_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_train, y_train_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_train, y_train_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
train_matrix = confusion_matrix(y_train, y_train_pred)
print(train_matrix)
ax = sns.heatmap(train_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
# to use dataframe and load csv file
import pandas as pd
# to use for mathematical operations
import numpy as np
# split the set in 2 set, common train and test
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# plot different designs
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(123)
data = pd.read_csv("dataset/BackOrders.csv",header=0)
data.shape
data.head()
for col in ['potential_issue',
'deck_risk',
'oe_constraint',
'ppap_risk',
'stop_auto_buy',
'rev_stop',
'went_on_backorder']:
data[col]=pd.factorize(data[col])[0]
data.describe(include='all')
data['perf_6_month_avg']=data['perf_6_month_avg'].replace(-99, np.NaN)
data['perf_12_month_avg']=data['perf_12_month_avg'].replace(-99, np.NaN)
varnames=list(data)[1:]
correlations = data[varnames].corr()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,22,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(varnames,rotation=90)
ax.set_yticklabels(varnames)
plt.show()
data.drop('rev_stop', axis=1, inplace=True)
data.drop('oe_constraint', axis=1, inplace=True)
data.drop('potential_issue', axis=1, inplace=True)
data.drop('stop_auto_buy', axis=1, inplace=True)
data.drop('deck_risk', axis=1, inplace=True)
def check_missing(data):
tot = data.isnull().sum().sort_values(ascending=False)
perc = ( round(100*data.isnull().sum()/data.isnull().count(),1) ).sort_values(ascending=False)
missing_data = pd.concat([tot, perc], axis=1, keys=['Missing', 'Percent'])
return missing_data[:3]
check_missing(data)
data.fillna(data.median(), inplace=True)
data
check_missing(data)
data.drop('sku', axis=1, inplace=True)
data
X, y = data.loc[:,data.columns!='went_on_backorder'].values, data.loc[:,'went_on_backorder'].values
X,y
X_train_1, X_test, y_train_1, y_test = train_test_split(X, y, test_size=0.1, random_state=123, stratify = data['went_on_backorder'])
print(X_train_1.shape)
print(X_test.shape)
print(pd.value_counts(y_train)/y_train.size * 100)
print(pd.value_counts(y_test)/y_test.size * 100)
def balance_split(X_train_1,y_train_1,flag=""):
X_train0 = []
X_train1 = []
y_train0 = []
y_train1 = []
for i in range(len(y_train_1)):
if y_train_1[i] == 0:
X_train0.append(X_train_1[i])
y_train0.append(y_train_1[i])
else:
X_train1.append(X_train_1[i])
y_train1.append(y_train_1[i])
X_train =[]
y_train = []
if flag == "fair":
X_train = X_train0[:1000] + X_train1[:1000]
y_train = y_train0[:1000] + y_train1[:1000]
else:
X_train = X_train0[:10000] + X_train1
y_train = y_train0[:10000] + y_train1
return np.asarray(X_train),np.asarray(y_train)
def save_data(X_train_1,y_train_1,flag="",name=""):
if flag == "fair":
X_train,y_train = balance_split(X_train_1,y_train_1,flag)
df_train = pd.concat([pd.DataFrame(X_train), pd.DataFrame(y_train)], axis=1)
df_train.to_csv("dataset/fair_"+name+".csv", index=False)
else:
X_train,y_train = balance_split(X_train_1,y_train_1)
df_train = pd.concat([pd.DataFrame(X_train), pd.DataFrame(y_train)], axis=1)
df_train.to_csv("dataset/classic_"+name+".csv", index=False)
#classic data
save_data(X_train_1,y_train_1,flag="classic",name="train")
#fair data
save_data(X_train_1,y_train_1,flag="fair",name="train")
#classic data
save_data(X_test,y_test,flag="classic",name="test")
data = pd.read_csv("dataset/classic_train.csv")
X,y = data[data.columns[:16]].values, data[data.columns[16]].values
X.shape, y.shape
data = pd.read_csv("dataset/fair_train.csv")
X,y = data[data.columns[:16]].values, data[data.columns[16]].values
X.shape, y.shape
data = pd.read_csv("dataset/classic_test.csv")
X,y = data[data.columns[:16]].values, data[data.columns[16]].values
X.shape, y.shape
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.qubit", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.mixed", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
@qml.transforms.insert(qml.AmplitudeDamping, 0.2, position="end")
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_layers_noise.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
https://arxiv.org/pdf/2010.07335.pdf
https://arxiv.org/pdf/2105.10162.pdf
https://arxiv.org/pdf/2203.01340.pdf maria
https://arxiv.org/pdf/1810.03787.pdf MERA
https://arxiv.org/pdf/1905.01426.pdf MERA 2
https://pennylane.ai/qml/demos/tutorial_tn_circuits.html
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
# !pip install --upgrade pip
# !pip uninstall tensorflow --y
# !pip install tensorflow
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load csv file
import pandas as pd
# numpy to the seed
import numpy as np
# load csv fileframework to neural networks
import tensorflow as tf
#Method forthe neural network
from keras.regularizers import l2
from keras.models import Sequential
from keras.layers import Dense, Dropout
#save as image the model summary
from keras.utils.vis_utils import plot_model
# librariesto plot
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
data_train = pd.read_csv("fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
np.random.seed(123)
tf.random.set_seed(123)
scale = StandardScaler()
scale.fit(X_train)
X_train_std = scale.transform(X_train)
X_test_std = scale.transform(X_test)
X_train_std[1], y_train[1]
model = Sequential()
model.add(Dense(25, input_dim=16, activation='relu', kernel_regularizer=l2(1e-6),kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# Compile model
auc = tf.keras.metrics.AUC()
model.compile(loss='binary_crossentropy', optimizer="Adam", metrics=['accuracy',auc])
model_history = model.fit(X_train_std, y_train, epochs=100,
batch_size=32,
validation_split=0.2, shuffle=True)
train_pred = model.predict(X_train_std)
test_pred = model.predict(X_test_std)
y_train_pred = (model.predict(X_train_std) > 0.5).astype("int32")
y_test_pred = (model.predict(X_test_std) > 0.5).astype("int32")
accuracy = accuracy_score(y_train, y_train_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_train, y_train_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_train, y_train_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_train, y_train_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_train, y_train_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_train, y_train_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
train_matrix = confusion_matrix(y_train, y_train_pred)
print(train_matrix)
ax = sns.heatmap(train_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
# !pip install --upgrade pip
# !pip uninstall tensorflow --y
# !pip install tensorflow
#import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load csv file
import pandas as pd
# numpy to the seed
import numpy as np
# load csv fileframework to neural networks
import tensorflow as tf
#Method forthe neural network
from keras.regularizers import l2
from keras.models import Sequential
from keras.layers import Dense, Dropout
#save as image the model summary
from keras.utils.vis_utils import plot_model
# librariesto plot
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
data_train = pd.read_csv("classic_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
np.random.seed(123)
tf.random.set_seed(123)
scale = StandardScaler()
scale.fit(X_train)
X_train_std = scale.transform(X_train)
X_test_std = scale.transform(X_test)
X_train_std[1], y_train[1]
model = Sequential()
model.add(Dense(25, input_dim=16, activation='relu', kernel_regularizer=l2(1e-6),kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid',kernel_regularizer=l2(1e-6), kernel_initializer="glorot_normal"))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# Compile model
auc = tf.keras.metrics.AUC()
model.compile(loss='binary_crossentropy', optimizer="Adam", metrics=['accuracy',auc])
model_history = model.fit(X_train_std, y_train, epochs=100,
batch_size=32,
validation_split=0.2, shuffle=True)
train_pred = model.predict(X_train_std)
test_pred = model.predict(X_test_std)
y_train_pred = (model.predict(X_train_std) > 0.5).astype("int32")
y_test_pred = (model.predict(X_test_std) > 0.5).astype("int32")
accuracy = accuracy_score(y_train, y_train_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_train, y_train_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_train, y_train_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_train, y_train_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_train, y_train_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_train, y_train_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
train_matrix = confusion_matrix(y_train, y_train_pred)
print(train_matrix)
ax = sns.heatmap(train_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.qubit", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
from sklearn.preprocessing import StandardScaler
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
scale = StandardScaler()
scale.fit(X_train)
X_train = scale.transform(X_train)
X_test = scale.transform(X_test)
n_wires = 4
dev = qml.device("default.qubit", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_standard_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.qubit", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 2
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_2_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#pip install pennyane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
n_wires = 4
dev = qml.device("default.qubit", wires=n_wires)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 4
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_4_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from IPython.display import clear_output
clear_output(wait=False)
import os
import tensorflow as tf
data_train = pd.read_csv("dataset/classic_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
#statepreparation(x)
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
#for i in range(4):
#qml.Hadamard(wires=i)
#qml.RY(x[i], wires=i)
#qml.Hadamard(wires=i)
#qml.RX(x[i+4], wires=i)
#qml.RX(x[i+8], wires=i)
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
#qml.CNOT(wires=[4,0])
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
for w in weights:
#layer_1(w[:13])
#layer_2(w[12:21])
#layer_2(w[16:24])
#layer_3(w[21:])
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(1))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
fig, ax = qml.draw_mpl(circuit)(weights_init,np.asarray(X_train[0]))
fig.show()
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mera_1_layers_classic_dataset.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#!pip install pennylane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def layer_1(W):
qml.RY(W[0], wires=0)
qml.RY(W[1], wires=1)
qml.RY(W[2], wires=2)
qml.RY(W[3], wires=3)
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.Hadamard(wires=2)
qml.Hadamard(wires=3)
qml.RZ(W[4], wires=0)
qml.RZ(W[5], wires=1)
qml.RZ(W[6], wires=2)
qml.RZ(W[7], wires=3)
qml.CNOT(wires=[1,0])
qml.RY(W[8], wires=1)
qml.CNOT(wires=[2,0])
qml.RY(W[9], wires=2)
qml.CNOT(wires=[3,0])
qml.RY(W[10], wires=3)
qml.CNOT(wires=[2,1])
qml.RY(W[11], wires=2)
qml.CNOT(wires=[3,1])
qml.RY(W[12], wires=3)
# qml.Hadamard(wires=0)
# qml.Hadamard(wires=1)
# qml.Hadamard(wires=2)
# qml.Hadamard(wires=3)
#qml.CNOT(wires=[1,0])
#qml.CNOT(wires=[2,0])
#qml.CNOT(wires=[3,0])
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MPS.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
#statepreparation(x)
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
#for i in range(4):
#qml.Hadamard(wires=i)
#qml.RY(x[i], wires=i)
#qml.Hadamard(wires=i)
#qml.RX(x[i+4], wires=i)
#qml.RX(x[i+8], wires=i)
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
#qml.CNOT(wires=[4,0])
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
for w in weights:
#layer_1(w[:13])
#layer_2(w[12:21])
#layer_2(w[16:24])
#layer_3(w[21:])
qml.MPS(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,3, 2, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3,4,5,6,7])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mps_1_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#!pip install pennylane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
from sklearn.preprocessing import StandardScaler
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
scale = StandardScaler()
scale.fit(X_train)
X_train = scale.transform(X_train)
X_test = scale.transform(X_test)
dev = qml.device("default.qubit", wires=4)
def layer_1(W):
qml.RY(W[0], wires=0)
qml.RY(W[1], wires=1)
qml.RY(W[2], wires=2)
qml.RY(W[3], wires=3)
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.Hadamard(wires=2)
qml.Hadamard(wires=3)
qml.RZ(W[4], wires=0)
qml.RZ(W[5], wires=1)
qml.RZ(W[6], wires=2)
qml.RZ(W[7], wires=3)
qml.CNOT(wires=[1,0])
qml.RY(W[8], wires=1)
qml.CNOT(wires=[2,0])
qml.RY(W[9], wires=2)
qml.CNOT(wires=[3,0])
qml.RY(W[10], wires=3)
qml.CNOT(wires=[2,1])
qml.RY(W[11], wires=2)
qml.CNOT(wires=[3,1])
qml.RY(W[12], wires=3)
# qml.Hadamard(wires=0)
# qml.Hadamard(wires=1)
# qml.Hadamard(wires=2)
# qml.Hadamard(wires=3)
#qml.CNOT(wires=[1,0])
#qml.CNOT(wires=[2,0])
#qml.CNOT(wires=[3,0])
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MPS.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
#statepreparation(x)
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
#for i in range(4):
#qml.Hadamard(wires=i)
#qml.RY(x[i], wires=i)
#qml.Hadamard(wires=i)
#qml.RX(x[i+4], wires=i)
#qml.RX(x[i+8], wires=i)
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
#qml.CNOT(wires=[4,0])
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
for w in weights:
#layer_1(w[:13])
#layer_2(w[12:21])
#layer_2(w[16:24])
#layer_3(w[21:])
qml.MPS(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,3, 2, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3,4,5,6,7])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mps_1_layers_std.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#!pip install pennylane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def layer_1(W):
qml.RY(W[0], wires=0)
qml.RY(W[1], wires=1)
qml.RY(W[2], wires=2)
qml.RY(W[3], wires=3)
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.Hadamard(wires=2)
qml.Hadamard(wires=3)
qml.RZ(W[4], wires=0)
qml.RZ(W[5], wires=1)
qml.RZ(W[6], wires=2)
qml.RZ(W[7], wires=3)
qml.CNOT(wires=[1,0])
qml.RY(W[8], wires=1)
qml.CNOT(wires=[2,0])
qml.RY(W[9], wires=2)
qml.CNOT(wires=[3,0])
qml.RY(W[10], wires=3)
qml.CNOT(wires=[2,1])
qml.RY(W[11], wires=2)
qml.CNOT(wires=[3,1])
qml.RY(W[12], wires=3)
# qml.Hadamard(wires=0)
# qml.Hadamard(wires=1)
# qml.Hadamard(wires=2)
# qml.Hadamard(wires=3)
#qml.CNOT(wires=[1,0])
#qml.CNOT(wires=[2,0])
#qml.CNOT(wires=[3,0])
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MPS.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
#statepreparation(x)
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
#for i in range(4):
#qml.Hadamard(wires=i)
#qml.RY(x[i], wires=i)
#qml.Hadamard(wires=i)
#qml.RX(x[i+4], wires=i)
#qml.RX(x[i+8], wires=i)
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
#qml.CNOT(wires=[4,0])
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
for w in weights:
#layer_1(w[:13])
#layer_2(w[12:21])
#layer_2(w[16:24])
#layer_3(w[21:])
qml.MPS(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 2
weights_init = 2*np.pi * np.random.randn(num_layers,3, 2, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3,4,5,6,7])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mps_2_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
#!pip install pennylane
#improt pennylane dependnecies
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
# load the csv files
import pandas as pd
# plot the historical acc and cost
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def layer_1(W):
qml.RY(W[0], wires=0)
qml.RY(W[1], wires=1)
qml.RY(W[2], wires=2)
qml.RY(W[3], wires=3)
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.Hadamard(wires=2)
qml.Hadamard(wires=3)
qml.RZ(W[4], wires=0)
qml.RZ(W[5], wires=1)
qml.RZ(W[6], wires=2)
qml.RZ(W[7], wires=3)
qml.CNOT(wires=[1,0])
qml.RY(W[8], wires=1)
qml.CNOT(wires=[2,0])
qml.RY(W[9], wires=2)
qml.CNOT(wires=[3,0])
qml.RY(W[10], wires=3)
qml.CNOT(wires=[2,1])
qml.RY(W[11], wires=2)
qml.CNOT(wires=[3,1])
qml.RY(W[12], wires=3)
# qml.Hadamard(wires=0)
# qml.Hadamard(wires=1)
# qml.Hadamard(wires=2)
# qml.Hadamard(wires=3)
#qml.CNOT(wires=[1,0])
#qml.CNOT(wires=[2,0])
#qml.CNOT(wires=[3,0])
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MPS.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
#statepreparation(x)
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
#for i in range(4):
#qml.Hadamard(wires=i)
#qml.RY(x[i], wires=i)
#qml.Hadamard(wires=i)
#qml.RX(x[i+4], wires=i)
#qml.RX(x[i+8], wires=i)
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
#qml.CNOT(wires=[4,0])
#qml.CNOT(wires=[0,1])
#qml.CNOT(wires=[1,2])
#qml.CNOT(wires=[2,3])
#qml.CNOT(wires=[3,0])
for w in weights:
#layer_1(w[:13])
#layer_2(w[12:21])
#layer_2(w[16:24])
#layer_3(w[21:])
qml.MPS(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 4
weights_init = 2*np.pi * np.random.randn(num_layers,3, 2, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3,4,5,6,7])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
y_test_pred = []
for i in y_pred:
if i < 0:
y_test_pred.append(-1)
else:
y_test_pred.append(1)
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'mps_4_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
# pip install pennylane
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
import tensorflow as tf
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.TTN.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.TTN(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
#precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'ttn_1_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
# pip install pennylane
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pandas as pd
import seaborn as sns
from IPython.display import clear_output
clear_output(wait=False)
import os
import tensorflow as tf
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
scale = StandardScaler()
scale.fit(X_train)
X_train = scale.transform(X_train)
X_test = scale.transform(X_test)
dev = qml.device("default.qubit", wires=4)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.TTN.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.TTN(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 1
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
if sum(x) == 0:
x[0]=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
#precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'ttn_1_layers_standard.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
# pip install pennylane
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from IPython.display import clear_output
clear_output(wait=False)
import os
import tensorflow as tf
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.TTN.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.TTN(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 2
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
x[0]+=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'ttn_2_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/alejomonbar/Quantum-Supply-Chain-Manager
|
alejomonbar
|
%load_ext autoreload
%autoreload 2
# pip install pennylane
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from IPython.display import clear_output
clear_output(wait=False)
import os
import tensorflow as tf
data_train = pd.read_csv("dataset/fair_train.csv")
X_train,y_train = data_train[data_train.columns[:16]].values, data_train[data_train.columns[16]].values
data_test = pd.read_csv("dataset/classic_test.csv")
X_test,y_test = data_test[data_test.columns[:16]].values, data_test[data_test.columns[16]].values
(X_train.shape, y_train.shape),(X_test.shape, y_test.shape)
dev = qml.device("default.qubit", wires=4)
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.TTN.get_n_blocks(range(n_wires),n_block_wires)
n_blocks
@qml.qnode(dev)
def circuit(weights, x):
qml.AmplitudeEmbedding(x, wires=[0,1,2,3],normalize=True,pad_with=True)
for w in weights:
qml.TTN(range(n_wires),n_block_wires,block, n_params_block, w)
#print(w)
#print(x)
return qml.expval(qml.PauliZ(3))
def variational_classifier(weights, bias, x):
return circuit(weights, x) + bias
def square_loss(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss = loss + 1
loss = loss / len(labels)
return loss
def cost(weights, bias, X, Y):
#print(1)
predictions = [variational_classifier(weights, bias, x) for x in X]
return square_loss(Y, predictions)
np.random.seed(0)
num_layers = 4
weights_init = 2*np.pi * np.random.randn(num_layers,n_blocks, n_params_block, requires_grad=True)
bias_init = np.array(0.0, requires_grad=True)
print(weights_init, bias_init)
print(qml.draw(circuit,expansion_strategy='device',wire_order=[0,1,2,3])(weights_init,np.asarray(X_train[0])))
for i in weights_init:
print(i[0])
y_train = np.where(y_train < 1, -1, y_train)
y_test = np.where(y_test < 1, -1, y_test)
from sklearn.utils import shuffle
X,y = shuffle(X_train, y_train, random_state=0)
from sklearn.model_selection import train_test_split
opt = NesterovMomentumOptimizer(0.4)
batch_size = 32
num_data = len(y_train)
num_train = 0.9
# train the variational classifier
weights = weights_init
bias = bias_init
print()
cost_g = []
acc_train = []
acc_test = []
plt.show()
for it in range(50):
X_train_70, X_test_30, y_train_70, y_test_30 =train_test_split(np.asarray(X), np.asarray(y), train_size=num_train, test_size=1.0-num_train, shuffle=True)
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X_train_70), (batch_size,))
feats_train_batch = X_train_70[batch_index]
Y_train_batch = y_train_70[batch_index]
weights, bias, _, _ = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
# Compute predictions on train and validation set
predictions_train = [np.sign(variational_classifier(weights, bias, f)) for f in X_train_70]
predictions_val = [np.sign(variational_classifier(weights, bias, f)) for f in X_test_30]
# Compute accuracy on train and validation set
acc_tra = accuracy(y_train_70, predictions_train)
acc_val = accuracy(y_test_30, predictions_val)
cost_train = cost(weights, bias,X_train, y_train)
cost_g.append(cost_train)
acc_train.append(acc_tra)
acc_test.append(acc_val)
clear_output(wait=True)
plt.plot(cost_g,label='cost')
plt.plot(acc_train,label='acc_train')
plt.plot(acc_test,label='acc_test')
plt.legend(['cost','acc_train','acc_test'])
plt.show()
print(
"Iter: {:5d} | Cost: {:0.7f} | Acc train: {:0.7f} | Acc validation: {:0.7f} "
"".format(it + 1, cost_train, acc_tra, acc_val)
)
print(weights)
x_test = []
for x in X_test.tolist():
x[0]+=1
x_test.append( x/ np.linalg.norm(x))
x_test[0]
y_test_pred = [np.sign(variational_classifier(weights, bias, f)) for f in x_test]
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.preprocessing import StandardScaler
# demonstration of calculating metrics for a neural network model using sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
accuracy = accuracy_score(y_test, y_test_pred)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, y_test_pred)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, y_test_pred)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, y_test_pred)
print('F1 score: %f' % f1)
# kappa
kappa = cohen_kappa_score(y_test, y_test_pred)
print('Cohens kappa: %f' % kappa)
# ROC AUC
auc = roc_auc_score(y_test, y_test_pred)
print('ROC AUC: %f' % auc)
# confusion matrix
test_matrix = confusion_matrix(y_test, y_test_pred)
print(test_matrix)
ax = sns.heatmap(test_matrix, annot=True, cmap='Blues', fmt='g')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
ax.xaxis.set_ticklabels(['0','1'])
ax.yaxis.set_ticklabels(['0','1'])
## Display the visualization of the Confusion Matrix.
plt.show()
y_pred_1 = [int(i) for i in y_test_pred ]
y_pred_1 = ["{}\n".format(i) for i in y_pred_1]
with open(r'ttn_4_layers.csv', 'w') as fp:
fp.writelines(y_pred_1)
|
https://github.com/adarshisme/QiskitBiskitGlobal
|
adarshisme
|
!pip install qiskit
!pip install matplotlib
!pip install sympy
from tqdm import tqdm
import numpy as np
import math
import qiskit
from qiskit import circuit
from qiskit.circuit.random import random_circuit
import copy
import matplotlib as mpl
import matplotlib.pyplot as plt
from qiskit.quantum_info import PTM, Chi, Statevector, DensityMatrix, partial_trace
from qiskit import transpile, QuantumCircuit, QuantumRegister
import qiskit.quantum_info as qi
from qiskit.providers.aer import AerSimulator
from qiskit.providers.aer.noise import NoiseModel, amplitude_damping_error
from qiskit.tools.visualization import plot_histogram
import sympy as sp
from sympy import linsolve, sympify, var, Eq, solve, solve_linear_system, Matrix, symbols
import random
def twirl_qubit(circ, dist=None, qubit=0, twirling_gate=None, r=None):
if not twirling_gate:
twirling_gate = (r.choice([i for i in range(4)], size=1, p=dist) if dist else r.choice([i for i in range(4)], size=1))[0]
# twirling_gate = random.randint(0, 3)
if twirling_gate == 1:
circ.x(qubit)
elif twirling_gate == 2:
circ.y(qubit)
elif twirling_gate == 3:
circ.z(qubit)
return twirling_gate
def sim(num_gates=100, noise=False, twirl=False, twirl_dist=[1, 0, 0, 0], circ=None, noise_AD=math.pi/9, noise_dephasing=math.pi/9, circ_seed=1, twirl_seed=1):
# twirl_set = {0: 'I', 1: 'X', 2: 'Y', 3: 'Z'}
np.random.seed(0)
circ_seed = 7654
rand_circ = np.random.RandomState()
rand_twirl = np.random.RandomState()
rand_circ.seed(circ_seed)
rand_twirl.seed(twirl_seed)
if not circ:
circ = QuantumCircuit(2)
circ.initialize([1, 0], 0)
circ.initialize([1, 0], 1)
random_gate_set = [i for i in range(3)] # and whatever other gates you want for the actual circuit
twirl_set = [i for i in range(3)] # and whatever other twirling gates you want
special_reg_AD = 1
special_reg_dephasing = 2
for i in range(num_gates):
random_gate = rand_circ.choice(random_gate_set)
random_theta = rand_circ.uniform(low=0, high=np.pi)
if random_gate == 0:
circ.rx(random_theta, 0)
elif random_gate == 1:
circ.ry(random_theta, 0)
elif random_gate == 2:
circ.rz(random_theta, 0)
#print(random_theta, end=' ')
# random_gate = np.random.choice([i for i in range(4)])
# if random_gate == 0:
# # circ.h(0)
# circ.z(0)
# elif random_gate == 1:
# circ.x(0)
# elif random_gate == 2:
# circ.y(0)
# elif random_gate == 3:
# circ.z(0)
# else:
# circ.s(0)
if noise:
if twirl:
twirling_gate = twirl_qubit(circ, dist=twirl_dist, r=rand_twirl)
# print(twirling_gate, end=' ')
# simulate noise on circuit
circ.cry(noise_AD, 0, special_reg_AD)
circ.cnot(special_reg_AD, 0)
special_reg_AD += 2
if twirl:
twirl_qubit(circ, dist=twirl_dist, twirling_gate=twirling_gate, r=rand_twirl)
twirling_gate = twirl_qubit(circ, dist=twirl_dist, r=rand_twirl)
circ.ry(noise_dephasing, special_reg_dephasing)
circ.cz(special_reg_dephasing, 0)
special_reg_dephasing += 2
if twirl:
twirl_qubit(circ, dist=twirl_dist, twirling_gate=twirling_gate, r=rand_twirl)
# boom
# circ.draw()
return circuit
def state_tomography(p):
I = np.trace(np.matmul(np.array([[1, 0],[0, 1]]), np.array(p)))
X = np.trace(np.matmul(np.array([[0, 1],[1, 0]]), np.array(p)))
Y = np.trace(np.matmul(np.array([[0, 0- 1j],[1j, 0]]), np.array(p)))
Z = np.trace(np.matmul(np.array([[1, 0],[0, -1]]), np.array(p)))
return np.array([I, X, Y, Z])
def equation_PTM(input_list, output_list):
I, X, Y, Z = var('I X Y Z')
I_o, X_o, Y_o, Z_o = var('I_o X_o Y_o Z_o')
eq = sp.Function('eq')
first = float(np.real(input_list[0])).__round__(4)*I
second = float(np.real(input_list[1])).__round__(4)*X
third = float(np.real(input_list[2])).__round__(4)*Y
fourth = float(np.real(input_list[3])).__round__(4)*Z
fifth = float(np.real(output_list[0])).__round__(4)*I_o
sixth = float(np.real(output_list[1])).__round__(4)*X_o
seventh = float(np.real(output_list[2])).__round__(4)*Y_o
eighth = float(np.real(output_list[3])).__round__(4)*Z_o
eq = Eq(first + second + third + fourth, fifth + sixth + seventh + eighth)
return eq
import random
# Function returns a list of 4 random distributions as a list - [i_dist, x_dist, y_dist, z_dist]
def gen_dist():
r = [random.random() for i in range(4)]
r_sum = sum(r)
r = [(i/r_sum) for i in r]
return r
def conjug_dists():
dists = []
for i in range(4):
dist = [0 for _ in range(4)]
dist[i] = 1
dists.append(dist)
return dists
conjug_dists()
runs = 1
noise_AD = math.pi/100
noise_dephasing = math.pi/100
def error():
t = np.array(PTM(True))
f = np.array(PTM(False))
ret = t - f
return ret
def fid(twirl_dist):
twirled, ideal = (PTM(noisy=True, twirl=True, twirl_dist=twirl_dist, noise_AD=noise_AD, noise_dephasing=noise_dephasing, ideal=False))
# print(t[0]) # t is a list of R matrices, of length [runs]
fid_sum = 0
for R_twirled, R_ideal in zip(twirled, ideal):
fid_sum += (np.trace(np.matmul(np.transpose(R_ideal), R_twirled)) + 2) / 6
fid_sum /= runs
# print(fid_sum)
return fid_sum
def ket(k):
k = str(k)
ket_dict = {'0': [1, 0], '1': [0, 1], '+': [1/math.sqrt(2), 1/math.sqrt(2)], '-': [1/math.sqrt(2), -1/math.sqrt(2)], 'i': [1/math.sqrt(2), 1j/math.sqrt(2)], '-i': [1/math.sqrt(2), -1j/math.sqrt(2)]}
return ket_dict[k]
def init_circ(num_qubits=13, qubit_0_init='0'):
circ0 = QuantumCircuit(num_qubits)
circ0.initialize(ket(qubit_0_init), 0)
for i in range(1, num_qubits):
circ0.initialize(ket('0'), i)
return circ0
def run(noisy, state='0', num_qubits=13, twirl=False, twirl_dist=None, noise_AD=0, noise_dephasing=0, circ_seed=1):
# print('state:', state)
partials = np.zeros(shape=(2, 2), dtype='complex128')
n = 0
partials = []
while n < runs:
circ0 = init_circ(num_qubits, qubit_0_init=state)
sim(num_gates=6, noise=noisy, circ=circ0, noise_AD=noise_AD, twirl=twirl, noise_dephasing=noise_dephasing, twirl_dist=twirl_dist, circ_seed=1, twirl_seed=10000+n)
state_in0 = Statevector.from_int(0, 2 ** 13)
state_out0 = state_in0.evolve(circ0)
rho_out0 = DensityMatrix(state_out0)
# print(np.array(partial_trace(rho_out0, [i for i in range(1, 13)])))
partials.append(np.array(partial_trace(rho_out0, [i for i in range(1, 13)])))
n += 1
return partials
def PTM(noisy=False, twirl=False, twirl_dist=None, noise_AD=noise_AD, noise_dephasing=noise_dephasing, ideal=False, circ_seed=1):
#print(locals())
# ket 0
partial_rho_out0s = run(noisy=noisy, state='0', num_qubits=13, twirl=twirl, twirl_dist=twirl_dist, noise_AD=noise_AD, noise_dephasing=noise_dephasing, circ_seed=circ_seed)
partial_rho_in0 = np.array([[1, 0], [0, 0]])
# ket 1
partial_rho_out1s = run(noisy=noisy, state='1', num_qubits=13, twirl=twirl, twirl_dist=twirl_dist, noise_AD=noise_AD, noise_dephasing=noise_dephasing, circ_seed=circ_seed)
partial_rho_in1 = np.array([[0, 0], [0, 1]])
# ket +
partial_rho_outXs = run(noisy=noisy, state='+', num_qubits=13, twirl=twirl, twirl_dist=twirl_dist, noise_AD=noise_AD, noise_dephasing=noise_dephasing, circ_seed=circ_seed)
partial_rho_inX = np.array([[0.5, 0.5], [0.5, 0.5]])
# ket i
partial_rho_outYs = run(noisy=noisy, state='i', num_qubits=13, twirl=twirl, twirl_dist=twirl_dist, noise_AD=noise_AD, noise_dephasing=noise_dephasing, circ_seed=circ_seed)
partial_rho_inY = np.array([[0.5, -0.5j], [0.5j, 0.5]])
eqs = []
for partial_rho_out0, partial_rho_out1, partial_rho_outX, partial_rho_outY in zip(partial_rho_out0s, partial_rho_out1s, partial_rho_outXs, partial_rho_outYs):
Eq1 = equation_PTM(state_tomography(partial_rho_in0), state_tomography(partial_rho_out0))
# print("1", Eq1)
Eq2 = equation_PTM(state_tomography(partial_rho_in1), state_tomography(partial_rho_out1))
# print("2", Eq2)
Eq3 = equation_PTM(state_tomography(partial_rho_inX), state_tomography(partial_rho_outX))
# print("3", Eq3)
Eq4 = equation_PTM(state_tomography(partial_rho_inY), state_tomography(partial_rho_outY))
# print("4", Eq4)
eqs.append((Eq1, Eq2, Eq3, Eq4))
I, X, Y, Z = var('I X Y Z')
I_o, X_o, Y_o, Z_o = var('I_o X_o Y_o Z_o')
solution_set = [linsolve([Eq1, Eq2, Eq3, Eq4], (I, X, Y, Z)) for Eq1, Eq2, Eq3, Eq4 in eqs]
I_o = np.array([[1, 0],[0, 1]])
X_o = np.array([[0, 1],[1, 0]])
Y_o = np.array([[0, 0- 1j],[1j, 0]])
Z_o = np.array([[1, 0],[0, -1]])
R_list = []
for solutions in solution_set:
solutions_list = list(solutions.args[0])
#convert sympy to numpy
solutions_list = eval(str(solutions_list))
# print(solutions_list)
reference_list = [I_o, X_o, Y_o, Z_o]
R = [[0 for i in range(4)] for j in range(4)]
for i in range(4):
for j in range(4):
sol_j = solutions_list[j]
ref_i = reference_list[i]
R[i][j] = 0.5*np.real(np.matmul(solutions_list[j], reference_list[i]).trace())
# print(R)
R_list.append(R)
# print('\nR:')
# print(R)
# print()
if not ideal:
return (R_list, PTM(noisy=True, twirl=True, twirl_dist=twirl_dist, noise_AD=0, noise_dephasing=0, ideal=True))
else:
return R_list
noise_AD *= 5
noise_dephasing *= 5
runs *= 2
data = {}
# runs = 1
# ideal_matrix = PTM(noisy=False, twirl=True, noise_AD=0, noise_dephasing=0, ideal=True)
# ideal_matrix
# dists = [gen_dist() for _ in range(5)]
# dists = conjug_dists() + [[0.25 for _ in range(4)]]
# dists = [[0.25 for i in range(4)]] + conjug_dists()
# for dist in (dists):
# data[tuple(dist)] = fid(dist)
# print(dist, data[tuple(dist)])
# sorted_data = list(map(lambda x: str(x) + ': ' + str(data[x]), reversed(sorted(data.keys(), key=lambda x: data[x]))))
# print(data)
def print_mat(mat):
for row in mat:
print(row)
runs = 1
#seed = 1
matrices = []
storage = []
for circ_seed in range(234, 235):
runs=1
ideal_matrix = PTM(noisy=False, twirl=True, noise_AD=0, noise_dephasing=0, ideal=True)[0]
print('ideal matrix for circ_seed', circ_seed, ':')
print_mat(ideal_matrix)
print()
runs=1
for noise_mult in range(5, 26, 5):
noise_AD = noise_mult * math.pi/100
#print("AD error angle =", noise_mult/100)
for noise_dephase_mult in range(5, 26, 5):
#print("Dephasing error angle=", noise_dephase_mult/100)
noise_dephasing = noise_dephase_mult * math.pi/100
noisy_mat = PTM(noisy=True, twirl=True, noise_AD=noise_AD, noise_dephasing=noise_dephasing, ideal=True)[0]
# just_AD_mat = PTM(noisy=True, twirl=True, noise_AD=noise_AD, noise_dephasing=0, ideal=True)[0]
matrices.append(noisy_mat)
storage.append((matrices[-1]))
#print(matrices[-1])
print(storage)
#returns a data set of matrices and takes in a numpy array a
# x = [1, 2, 3, 4, 5, 6, 7, 8]
# y = [0.6282, 0.5894, 0.485, 0.3457, 0.2083, 0.1015, 0.0366, 0.0079]
# plt.plot(x, y)
# plt.show()
dists = [gen_dist() for _ in range(5)]
for dist in (dists):
data[tuple(dist)] = fid(dist)
# print(dist, data[tuple(dist)])
sorted_data = list(map(lambda x: str(x) + ': ' + str(data[x]), reversed(sorted(data.keys(), key=lambda x: data[x]))))
#print(data)
# training = []
# dists = conjug_dists()
# for seed in range(10):
# for dist in dists:
# training.append(fid(twirl_dist=dist))
# training
|
https://github.com/adarshisme/QiskitBiskitGlobal
|
adarshisme
|
import pandas as pd
import numpy as np
import torch
from torch.autograd import Variable
import sklearn
from sklearn.model_selection import train_test_split
# Function to calculate the amplitude damping and dephasing param/feature values
# in the dataset - enumerate all combinations from [0.05, 0.25] with 0.05 step
def calc_ad_dephasing_params():
step_size = 0.05
param_values = []
for ad_idx in range(1, 6):
ad = step_size * ad_idx
for dephasing_idx in range(1, 6):
dephasing = step_size * dephasing_idx
param_values.append((ad, dephasing))
return param_values
param_values = calc_ad_dephasing_params()
# Map the matrix values to new columns for use in Neural Network
def map_matrix_to_cols(noisy_matrix):
noisy_vals = []
for i in range(4):
for j in range(4):
noisy_vals.append(noisy_matrix[i][j])
return noisy_vals
# Function that takes in the ideal_matrix, data (noisy matrices for various
# amplitude damping and dephasing values, and these ad/dephasing param values
# and generates a pandas dataframe
def generate_dataframe(data, param_values):
columns = ['00','01','02','03','10','11','12','13','20','21','22','23','30','31','32','33','Amplitude Damping','Dephasing','Label']
ideal_matrix_vals = map_matrix_to_cols(data[0])
dataset = []
data_length = len(data)
for data_idx in range(data_length):
datapoint = []
ad = param_values[data_idx][0]
dephasing = param_values[data_idx][1]
noisy_matrix = data[data_idx]
for dp in ideal_matrix_vals:
datapoint.append(dp)
datapoint.append(ad)
datapoint.append(dephasing)
datapoint.append(data_idx)
dataset.append(datapoint)
df = pd.DataFrame(dataset, columns=columns)
return df
# Function to help preprocess the data for the model
def preprocess_df(df):
X = df.drop(columns=['Label'])
Y = df['Label']
train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.2, random_state=137)
scaler = sklearn.preprocessing.StandardScaler()
train_x = scaler.fit_transform(train_x)
test_x = scaler.fit_transform(test_x)
train_x = torch.from_numpy(train_x.astype(np.float32))
test_x = torch.from_numpy(test_x.astype(np.float32))
# Train_y is now a numpy object
train_y = list(train_y)
# train_y is now a torch object
train_y = torch.as_tensor(train_y, dtype = torch.float32)
test_y = torch.as_tensor(list(test_y), dtype=torch.float32)
train_y = train_y.view(train_y.shape[0],1)
test_y = test_y.view(test_y.shape[0],1)
n_samples,n_features=train_x.shape
return X, Y, train_x, test_x, train_y, test_y, n_samples, n_features
# Deep Classifier Non-Linear Neural Network
class DeepClassifier(torch.nn.Module):
def __init__(self, num_input_features=18, hidden_layers=[32, 64, 128, 256, 128, 64, 32]):
super(DeepClassifier,self).__init__()
L = []
n_in = num_input_features
for n_out in hidden_layers:
L.append(torch.nn.Linear(n_in, n_out))
L.append(torch.nn.ReLU())
n_in = n_out
# Avoid overfitting using dropouts
L.append(torch.nn.Dropout(0.25))
# We have 25 possible output noisy matrices
L.append(torch.nn.Linear(n_out, 25))
self.network = torch.nn.Sequential(*L)
self.activation = torch.nn.Sigmoid()
def forward(self,x):
x = self.network(x)
return x
# Function to calculate the fidelity for loss calculation
def calc_fidelity(predicted, expected):
fidelity = (np.trace(np.matmul(np.transpose(predicted), expected))+2)/6
return fidelity
# Function to take the matrix in list form of points to a numpy matrix
def matrix_vals_list_to_numpy_matrix(input_matrix):
result_matrix = []
for i in range(4):
cur_row = []
for j in range(4):
index = (i * 4) + j
cur_row.append(input_matrix[index])
result_matrix.append(cur_row)
numpy_matrix = np.array(result_matrix)
return numpy_matrix
# Customized loss function using fidelity for the neural net training
def quantum_loss(x, y, pred, data, base_loss):
# Pred is the predicted index of the noisy matrix
noisy_matrix = data[pred]
ideal_matrix_vals = x[:16]
amplitude_damping = x[16]
dephasing = x[17]
numpy_ideal_matrix = matrix_vals_list_to_numpy_matrix(ideal_matrix_vals)
numpy_noisy_matrix = np.array(noisy_matrix)
fidelity = calc_fidelity(numpy_noisy_matrix, numpy_ideal_matrix)
result = 1 - fidelity
base_loss_int = int(base_loss)
return base_loss + result - base_loss_int
# Training function for the Neural Network
def train(model, train_x, train_y, data, learning_rate=0.001, num_epochs=5):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Use GPU if available
model.to(device)
# Setup the optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
calc_loss = torch.nn.MSELoss()
accs = []
# Train for num_epochs
for epoch in range(num_epochs):
# Set the model to training mode
model.train()
loss_vals, acc_vals = [], []
# Loop through training data
for train_idx in range(len(train_x)):
x = train_x[train_idx]
y = train_y[train_idx]
x = x.to(device)
y = y.to(device)
# Forward pass through the network to get prediction
pred = model(x)
base_loss = calc_loss(pred, y)
# Find the index of the prediction (highest probability)
top_preds, top_pred_indices = torch.topk(pred, 1)
pred_idx = top_pred_indices[0]
# Compute the loss using custom loss function
loss = quantum_loss(x, y, pred_idx, data, base_loss)
acc = (pred_idx == y)
loss_vals.append(loss)
acc_vals.append(acc)
# Update the model's weights step
optimizer.zero_grad()
# Backward Propagation
loss.backward()
# Gradient Descent
optimizer.step()
avg_loss = sum(loss_vals) / len(loss_vals)
avg_acc = sum(acc_vals) / len(acc_vals)
print('epoch %-3d \t loss = %0.8f \t acc = %0.20f \t' % (epoch, avg_loss, avg_acc))
accs.append(avg_acc)
return model, np.mean(accs)
data1 = [[[1.0, 0.0, 0.0, 0.0], [0.00470000000000001, -0.6807, 0.5656, 0.2411], [0.00509999999999999, -0.5835, -0.4381, -0.5786], [-0.019500000000000017, -0.2218, -0.5866, 0.6965]], [[1.0, 0.0, 0.0, 0.0], [0.00455, -0.56415, 0.46845, 0.16755], [0.00454999999999994, -0.51395, -0.35185, -0.52135], [-0.01934999999999998, -0.15855, -0.52385, 0.63715]], [[1.0, 0.0, 0.0, 0.0], [0.00420000000000001, -0.4124, 0.3386, 0.0767], [0.00375000000000003, -0.41315, -0.24125, -0.43515], [-0.01915, -0.07175, -0.43085, 0.55255]], [[1.0, 0.0, 0.0, 0.0], [0.00365, -0.26665, 0.20945, -0.00095], [0.00280000000000002, -0.2995, -0.1381, -0.3324], [-0.018899999999999972, 0.0175, -0.3223, 0.4596]], [[1.0, 0.0, 0.0, 0.0], [0.003, -0.154, 0.1071, -0.0462], [0.00184999999999999, -0.19185, -0.06365, -0.22745], [-0.018699999999999994, 0.0905, -0.2147, 0.3745]], [[1.0, 0.0, 0.0, 0.0], [0.0183, -0.6381, 0.5298, 0.2344], [0.0194, -0.5384, -0.4135, -0.5309], [-0.07549999999999996, -0.2139, -0.5395, 0.6372]], [[1.0, 0.0, 0.0, 0.0], [0.0176, -0.5282, 0.4389, 0.1646], [0.01735, -0.47425, -0.33215, -0.47845], [-0.07505000000000006, -0.15485, -0.48185, 0.58225]], [[1.0, 0.0, 0.0, 0.0], [0.01625, -0.38535, 0.31715, 0.07815], [0.0143, -0.3812, -0.2277, -0.3994], [-0.07430000000000003, -0.0739, -0.3963, 0.5039]], [[1.0, 0.0, 0.0, 0.0], [0.01425, -0.24835, 0.19615, 0.00395], [0.0107, -0.2763, -0.1304, -0.3051], [-0.07355, 0.00955, -0.29645, 0.41785]], [[1.0, 0.0, 0.0, 0.0], [0.01165, -0.14275, 0.10035, -0.03985], [0.00714999999999999, -0.17695, -0.06005, -0.20885], [-0.07279999999999998, 0.0777, -0.1974, 0.3391]], [[1.0, 0.0, 0.0, 0.0], [0.03925, -0.57265, 0.47475, 0.22265], [0.0403, -0.4705, -0.3754, -0.4594], [-0.16159999999999997, -0.2001, -0.4686, 0.549]], [[1.0, 0.0, 0.0, 0.0], [0.0377, -0.4732, 0.3933, 0.1587], [0.03595, -0.41425, -0.30145, -0.41405], [-0.16064999999999996, -0.14765, -0.41855, 0.50075]], [[1.0, 0.0, 0.0, 0.0], [0.03475, -0.34405, 0.28425, 0.07925], [0.02955, -0.33295, -0.20665, -0.34575], [-0.15925000000000006, -0.0756500000000001, -0.34425, 0.43185]], [[1.0, 0.0, 0.0, 0.0], [0.03045, -0.22055, 0.17575, 0.01055], [0.02215, -0.24125, -0.11825, -0.26425], [-0.15765000000000007, -0.00135000000000002, -0.25755, 0.35625]], [[1.0, 0.0, 0.0, 0.0], [0.0248, -0.1257, 0.0899, -0.0307], [0.01475, -0.15445, -0.05445, -0.18095], [-0.15615, 0.05955, -0.17165, 0.28715]], [[1.0, 0.0, 0.0, 0.0], [0.0653, -0.4919, 0.4064, 0.2054], [0.0639, -0.3885, -0.3272, -0.3742], [-0.26839999999999997, -0.1802, -0.3838, 0.4451]], [[1.0, 0.0, 0.0, 0.0], [0.06255, -0.40545, 0.33665, 0.14895], [0.05705, -0.34215, -0.26275, -0.33735], [-0.26695, -0.13595, -0.34285, 0.40485]], [[1.0, 0.0, 0.0, 0.0], [0.05765, -0.29345, 0.24325, 0.07855], [0.0469, -0.2749, -0.1801, -0.2818], [-0.26485000000000003, -0.07495, -0.28205, 0.34755]], [[1.0, 0.0, 0.0, 0.0], [0.05035, -0.18665, 0.15045, 0.01725], [0.0351, -0.1991, -0.103, -0.2155], [-0.26249999999999996, -0.012, -0.2111, 0.2846]], [[1.0, 0.0, 0.0, 0.0], [0.04095, -0.10515, 0.07695, -0.02045], [0.0234, -0.1273, -0.0474, -0.1477], [-0.2603, 0.0399, -0.1406, 0.2271]], [[1.0, 0.0, 0.0, 0.0], [0.09385, -0.40395, 0.33155, 0.18275], [0.0866, -0.3026, -0.2735, -0.286], [-0.38549999999999995, -0.155, -0.2955, 0.339]], [[1.0, 0.0, 0.0, 0.0], [0.08975, -0.33195, 0.27465, 0.13485], [0.07725, -0.26635, -0.21955, -0.25795], [-0.38369999999999993, -0.1195, -0.264, 0.3073]], [[1.0, 0.0, 0.0, 0.0], [0.0825, -0.2389, 0.1985, 0.075], [0.0635, -0.2139, -0.1504, -0.2156], [-0.3811, -0.0705, -0.2172, 0.2622]], [[1.0, 0.0, 0.0, 0.0], [0.0719, -0.1505, 0.1227, 0.0223], [0.0475, -0.1548, -0.086, -0.165], [-0.37820000000000004, -0.0198, -0.1626, 0.2127]], [[1.0, 0.0, 0.0, 0.0], [0.0583, -0.0836, 0.0628, -0.0108], [0.03165, -0.09895, -0.03955, -0.11315], [-0.37549999999999994, 0.0221, -0.1084, 0.1675]]]
ideal_matrix1 = [[[1.0, 0.0, 0.0, 0.0], [0.00470000000000001, -0.6807, 0.5656, 0.2411], [0.00509999999999999, -0.5835, -0.4381, -0.5786], [-0.019500000000000017, -0.2218, -0.5866, 0.6965]]]
def train_network_for_data(data, param_values):
df = generate_dataframe(data, param_values)
X, Y, train_x, test_x, train_y, test_y, n_samples, n_features = preprocess_df(df)
deep_model = DeepClassifier(n_features)
deep_model, avg_acc = train(deep_model, train_x, train_y, data, learning_rate=0.0001, num_epochs=50)
return deep_model, test_x, test_y, avg_acc
model, test_x, test_y, avg_acc = train_network_for_data(data1, param_values)
avg_acc
with torch.no_grad():
model.eval()
y_pred=model(test_x)
top_preds, top_pred_indices = torch.topk(y_pred, 1)
pred_idx = top_pred_indices[0]
accuracy=(pred_idx.eq(test_y).sum())/float(test_y.shape[0])
print(accuracy.item())
# Test if model worked
def get_model_pred(model, input_x_features, input_y_label):
model_pred = model(input_x_features)
top_preds, top_pred_indices = torch.topk(y_pred, 1)
pred_idx = top_pred_indices[0]
if pred_idx == input_y_label:
return True
else:
return False
predictions = []
set_1 = [[[1.0, 0.0, 0.0, 0.0], [0.01175, 0.82545, 0.05275, 0.41455], [-0.00285000000000002, 0.17875, 0.76745, -0.46215], [0.0007500000000000284, -0.36825, 0.50595, 0.71095]], [[1.0, 0.0, 0.0, 0.0], [0.01115, 0.68555, 0.05675, 0.38035], [-0.00235000000000002, 0.14245, 0.61175, -0.38255], [0.00034999999999990594, -0.31585, 0.46325, 0.68455]], [[1.0, 0.0, 0.0, 0.0], [0.01015, 0.49715, 0.05995, 0.32965], [-0.00169999999999998, 0.0963, 0.4136, -0.2761], [-0.00019999999999997797, -0.2436, 0.3981, 0.6402]], [[1.0, 0.0, 0.0, 0.0], [0.00889999999999996, 0.3082, 0.0589, 0.27], [-0.00105, 0.05395, 0.23175, -0.17035], [-0.0010499999999999954, -0.16755, 0.31875, 0.57785]], [[1.0, 0.0, 0.0, 0.0], [0.00744999999999998, 0.15725, 0.05185, 0.20915], [-0.000549999999999995, 0.02405, 0.10335, -0.08695], [-0.0020999999999999908, -0.1018, 0.2344, 0.498]], [[1.0, 0.0, 0.0, 0.0], [0.04535, 0.77275, 0.04605, 0.37885], [-0.0109, 0.1691, 0.7258, -0.433], [0.0008000000000000784, -0.3421, 0.4626, 0.6417]], [[1.0, 0.0, 0.0, 0.0], [0.0429, 0.6419, 0.0501, 0.3474], [-0.00900000000000001, 0.1348, 0.5785, -0.3584], [-0.0005000000000001115, -0.2934, 0.4233, 0.618]], [[1.0, 0.0, 0.0, 0.0], [0.03915, 0.46555, 0.05345, 0.30075], [-0.00650000000000001, 0.0911, 0.3912, -0.2587], [-0.0027499999999999747, -0.22605, 0.36375, 0.57815]], [[1.0, 0.0, 0.0, 0.0], [0.0343, 0.2887, 0.053, 0.246], [-0.00405, 0.05105, 0.21925, -0.15965], [-0.005900000000000016, -0.1553, 0.2913, 0.522]], [[1.0, 0.0, 0.0, 0.0], [0.02875, 0.14735, 0.04695, 0.19025], [-0.00205000000000001, 0.02275, 0.09775, -0.08145], [-0.009749999999999925, -0.09415, 0.21415, 0.45005]], [[1.0, 0.0, 0.0, 0.0], [0.0963, 0.6916, 0.0363, 0.3257], [-0.0224, 0.1539, 0.6608, -0.3881], [-0.005399999999999905, -0.3024, 0.3978, 0.5402]], [[1.0, 0.0, 0.0, 0.0], [0.0912, 0.5746, 0.0403, 0.2984], [-0.0185, 0.1227, 0.5267, -0.3213], [-0.008000000000000007, -0.2592, 0.364, 0.5204]], [[1.0, 0.0, 0.0, 0.0], [0.0832, 0.4169, 0.0439, 0.2579], [-0.01335, 0.08295, 0.35615, -0.23185], [-0.012499999999999956, -0.1993, 0.3127, 0.4871]], [[1.0, 0.0, 0.0, 0.0], [0.073, 0.2586, 0.0442, 0.2105], [-0.00825000000000001, 0.04645, 0.19955, -0.14315], [-0.018650000000000055, -0.13665, 0.25025, 0.44005]], [[1.0, 0.0, 0.0, 0.0], [0.0612, 0.1321, 0.0396, 0.1623], [-0.0042, 0.0207, 0.089, -0.073], [-0.02645000000000003, -0.08255, 0.18395, 0.37965]], [[1.0, 0.0, 0.0, 0.0], [0.1582, 0.5909, 0.0254, 0.2631], [-0.03505, 0.13475, 0.57845, -0.33235], [-0.024550000000000016, -0.25405, 0.32125, 0.42305]], [[1.0, 0.0, 0.0, 0.0], [0.14995, 0.49105, 0.02925, 0.24075], [-0.02905, 0.10745, 0.46115, -0.27515], [-0.02859999999999996, -0.2174, 0.2939, 0.4077]], [[1.0, 0.0, 0.0, 0.0], [0.13695, 0.35635, 0.03305, 0.20755], [-0.02095, 0.07265, 0.31175, -0.19855], [-0.03529999999999994, -0.1669, 0.2524, 0.3819]], [[1.0, 0.0, 0.0, 0.0], [0.12025, 0.22125, 0.03415, 0.16885], [-0.01295, 0.04075, 0.17475, -0.12255], [-0.044550000000000034, -0.11405, 0.20185, 0.34535]], [[1.0, 0.0, 0.0, 0.0], [0.101, 0.1132, 0.0311, 0.1296], [-0.00659999999999999, 0.0181, 0.0779, -0.0625], [-0.05620000000000003, -0.0687, 0.1483, 0.2983]], [[1.0, 0.0, 0.0, 0.0], [0.22435, 0.48085, 0.01505, 0.19925], [-0.0465, 0.1132, 0.4861, -0.2713], [-0.06214999999999998, -0.20235, 0.24305, 0.30715]], [[1.0, 0.0, 0.0, 0.0], [0.21275, 0.39975, 0.01865, 0.18195], [-0.0385, 0.0902, 0.3875, -0.2246], [-0.06719999999999998, -0.173, 0.2222, 0.2962]], [[1.0, 0.0, 0.0, 0.0], [0.1946, 0.2902, 0.0224, 0.1564], [-0.0278, 0.061, 0.262, -0.1621], [-0.07565, -0.13245, 0.19065, 0.27775]], [[1.0, 0.0, 0.0, 0.0], [0.17115, 0.18035, 0.02415, 0.12665], [-0.01715, 0.03415, 0.14685, -0.10005], [-0.08735000000000004, -0.09025, 0.15235, 0.25145]], [[1.0, 0.0, 0.0, 0.0], [0.14405, 0.09245, 0.02265, 0.09665], [-0.00875, 0.01525, 0.06545, -0.05105], [-0.10214999999999996, -0.05395, 0.11185, 0.21755]]]
set_2 = [[[1.0, 0.0, 0.0, 0.0], [0.00510000000000001, 0.8983, 0.1618, 0.0941], [0.0, 0.1081, -0.852, 0.3958], [-0.009449999999999958, 0.14565, -0.36805, -0.83305]], [[1.0, 0.0, 0.0, 0.0], [0.0049, 0.7337, 0.1437, 0.0858], [0.0, 0.0932, -0.7901, 0.3666], [-0.009450000000000125, 0.10875, -0.30985, -0.69905]], [[1.0, 0.0, 0.0, 0.0], [0.00460000000000001, 0.5177, 0.116, 0.0709], [0.0, 0.0721, -0.6936, 0.3213], [-0.009449999999999958, 0.0630499999999999, -0.23065, -0.51665]], [[1.0, 0.0, 0.0, 0.0], [0.0042, 0.3095, 0.0832, 0.0508], [-4.99999999999945e-05, 0.04935, -0.57205, 0.26425], [-0.009500000000000064, 0.0236, -0.1497, -0.3307]], [[1.0, 0.0, 0.0, 0.0], [0.0037, 0.1516, 0.0512, 0.0297], [0.0, 0.0289, -0.4373, 0.2013], [-0.009449999999999958, -0.000250000000000028, -0.08295, -0.17785]], [[1.0, 0.0, 0.0, 0.0], [0.02005, 0.84455, 0.14895, 0.08585], [0.0, 0.1002, -0.7763, 0.3607], [-0.037349999999999994, 0.13965, -0.34345, -0.77805]], [[1.0, 0.0, 0.0, 0.0], [0.0193, 0.6897, 0.1324, 0.0786], [-4.99999999999945e-05, 0.08645, -0.71985, 0.33415], [-0.03735000000000005, 0.10465, -0.28905, -0.65275]], [[1.0, 0.0, 0.0, 0.0], [0.0181, 0.4866, 0.107, 0.0653], [0.0, 0.0669, -0.632, 0.2928], [-0.037349999999999994, 0.06115, -0.21515, -0.48245]], [[1.0, 0.0, 0.0, 0.0], [0.01645, 0.29085, 0.07685, 0.04705], [-5.00000000000222e-05, 0.04575, -0.52125, 0.24085], [-0.037349999999999994, 0.0235500000000001, -0.13955, -0.30875]], [[1.0, 0.0, 0.0, 0.0], [0.01435, 0.14245, 0.04745, 0.02765], [0.0, 0.0268, -0.3984, 0.1834], [-0.037349999999999994, 0.000649999999999984, -0.07725, -0.16605]], [[1.0, 0.0, 0.0, 0.0], [0.0438, 0.7613, 0.1295, 0.0734], [-0.000299999999999967, 0.0884, -0.6639, 0.3086], [-0.08230000000000004, 0.13, -0.3058, -0.6936]], [[1.0, 0.0, 0.0, 0.0], [0.0422, 0.6216, 0.1153, 0.0677], [-0.000300000000000022, 0.0762, -0.6157, 0.2859], [-0.08224999999999999, 0.0979500000000001, -0.25735, -0.58185]], [[1.0, 0.0, 0.0, 0.0], [0.0396, 0.4385, 0.0934, 0.0568], [-0.000250000000000028, 0.05895, -0.54055, 0.25055], [-0.08224999999999999, 0.05795, -0.19135, -0.42995]], [[1.0, 0.0, 0.0, 0.0], [0.03595, 0.26205, 0.06735, 0.04125], [-0.000200000000000006, 0.0403, -0.4458, 0.2061], [-0.08230000000000004, 0.0233, -0.124, -0.2751]], [[1.0, 0.0, 0.0, 0.0], [0.03145, 0.12825, 0.04165, 0.02445], [-0.000149999999999983, 0.02365, -0.34075, 0.15695], [-0.08224999999999999, 0.00175, -0.06855, -0.14785]], [[1.0, 0.0, 0.0, 0.0], [0.0747, 0.6573, 0.1061, 0.0584], [-0.0015, 0.0739, -0.5319, 0.2474], [-0.14205, 0.11695, -0.25955, -0.58935]], [[1.0, 0.0, 0.0, 0.0], [0.07205, 0.53655, 0.09465, 0.05455], [-0.00140000000000001, 0.0637, -0.4932, 0.2292], [-0.14205, 0.08875, -0.21825, -0.49435]], [[1.0, 0.0, 0.0, 0.0], [0.06755, 0.37835, 0.07695, 0.04645], [-0.00125, 0.04935, -0.43305, 0.20085], [-0.14205, 0.05345, -0.16215, -0.36525]], [[1.0, 0.0, 0.0, 0.0], [0.0615, 0.226, 0.0556, 0.0342], [-0.001, 0.0337, -0.3572, 0.1652], [-0.14199999999999996, 0.0224, -0.105, -0.2336]], [[1.0, 0.0, 0.0, 0.0], [0.0538, 0.1106, 0.0346, 0.0205], [-0.000799999999999995, 0.0198, -0.273, 0.1258], [-0.14199999999999996, 0.003, -0.0579, -0.1255]], [[1.0, 0.0, 0.0, 0.0], [0.1108, 0.5425, 0.0814, 0.0428], [-0.00495000000000001, 0.05845, -0.39795, 0.18525], [-0.21395000000000003, 0.10145, -0.20935, -0.47635]], [[1.0, 0.0, 0.0, 0.0], [0.10685, 0.44275, 0.07295, 0.04065], [-0.00455, 0.05035, -0.36915, 0.17165], [-0.21389999999999998, 0.0775, -0.176, -0.3995]], [[1.0, 0.0, 0.0, 0.0], [0.1003, 0.3121, 0.0597, 0.0354], [-0.004, 0.039, -0.324, 0.1504], [-0.21385000000000004, 0.04755, -0.13065, -0.29505]], [[1.0, 0.0, 0.0, 0.0], [0.09125, 0.18635, 0.04345, 0.02665], [-0.00330000000000001, 0.0267, -0.2672, 0.1237], [-0.2138, 0.021, -0.0844, -0.1886]], [[1.0, 0.0, 0.0, 0.0], [0.08, 0.091, 0.0271, 0.0163], [-0.0025, 0.0157, -0.2043, 0.0942], [-0.21375000000000005, 0.00405, -0.04635, -0.10135]]]
set_3 = [[[1.0, 0.0, 0.0, 0.0], [9.99999999999994e-05, -0.9024, 0.1391, -0.0282], [-0.0137, 0.031, -0.0632999999999999, -0.9415], [0.00029999999999996696, -0.1436, -0.9149, 0.0597]], [[1.0, 0.0, 0.0, 0.0], [0.0002, -0.7273, 0.1119, -0.003], [-0.0126999999999999, 0.0479999999999999, -0.0557000000000001, -0.8665], [-0.00019999999999997797, -0.1245, -0.7844, 0.0563]], [[1.0, 0.0, 0.0, 0.0], [0.00025, -0.50215, 0.07685, 0.02605], [-0.01115, 0.06565, -0.0438500000000001, -0.75205], [-0.0009999999999999454, -0.097, -0.6015, 0.05]], [[1.0, 0.0, 0.0, 0.0], [0.000300000000000002, -0.2917, 0.0437, 0.0468], [-0.00919999999999999, 0.0741999999999999, -0.0298, -0.612], [-0.0019499999999999518, -0.06675, -0.40615, 0.04025]], [[1.0, 0.0, 0.0, 0.0], [0.000199999999999999, -0.1382, 0.0197, 0.0519], [-0.00714999999999999, 0.0686499999999999, -0.01615, -0.46215], [-0.002999999999999947, -0.0389, -0.2349, 0.027]], [[1.0, 0.0, 0.0, 0.0], [9.99999999999959e-05, -0.8512, 0.1312, -0.0318], [-0.0517000000000001, 0.0231000000000001, -0.0583999999999999, -0.8596], [0.0, -0.133, -0.8498, 0.0541]], [[1.0, 0.0, 0.0, 0.0], [0.00045, -0.68595, 0.10555, -0.00765], [-0.0479499999999999, 0.03965, -0.0514500000000001, -0.79095], [-0.0018000000000000238, -0.1153, -0.7286, 0.0511]], [[1.0, 0.0, 0.0, 0.0], [0.000649999999999998, -0.47335, 0.07255, 0.02045], [-0.04215, 0.05715, -0.04065, -0.68625], [-0.004650000000000043, -0.09005, -0.55885, 0.04555]], [[1.0, 0.0, 0.0, 0.0], [0.000799999999999995, -0.2748, 0.0413, 0.0408], [-0.03495, 0.06625, -0.02765, -0.55825], [-0.008399999999999963, -0.0619, -0.3773, 0.0368]], [[1.0, 0.0, 0.0, 0.0], [0.000600000000000003, -0.1301, 0.0186, 0.0465], [-0.027, 0.0619999999999999, -0.0151, -0.4213], [-0.012599999999999945, -0.0361, -0.2181, 0.0249]], [[1.0, 0.0, 0.0, 0.0], [-0.001, -0.7718, 0.1189, -0.0364], [-0.1059, 0.0121, -0.0508, -0.7377], [-0.0038499999999999646, -0.11695, -0.75075, 0.04575]], [[1.0, 0.0, 0.0, 0.0], [-0.00035, -0.62165, 0.09585, -0.01395], [-0.0982, 0.0277999999999999, -0.045, -0.6786], [-0.007600000000000051, -0.1014, -0.6437, 0.0434]], [[1.0, 0.0, 0.0, 0.0], [0.000450000000000001, -0.42885, 0.06575, 0.01245], [-0.0864, 0.0448000000000001, -0.0357, -0.5885], [-0.013600000000000001, -0.0791, -0.4936, 0.0389]], [[1.0, 0.0, 0.0, 0.0], [0.000799999999999995, -0.2486, 0.0376, 0.0321], [-0.0717, 0.0545, -0.0245, -0.4784], [-0.021200000000000052, -0.0546, -0.3333, 0.0317]], [[1.0, 0.0, 0.0, 0.0], [0.000699999999999999, -0.1176, 0.0169, 0.0387], [-0.05565, 0.05225, -0.01345, -0.36075], [-0.029949999999999977, -0.03185, -0.19275, 0.02175]], [[1.0, 0.0, 0.0, 0.0], [-0.0044, -0.6718, 0.1035, -0.0404], [-0.1653, 0.000199999999999978, -0.0417999999999999, -0.594], [-0.015549999999999953, -0.09735, -0.62975, 0.03605]], [[1.0, 0.0, 0.0, 0.0], [-0.0031, -0.5408, 0.0834, -0.0202], [-0.1535, 0.0146999999999999, -0.0371, -0.5462], [-0.02144999999999997, -0.08455, -0.53995, 0.03435]], [[1.0, 0.0, 0.0, 0.0], [-0.00155, -0.37275, 0.05725, 0.00385], [-0.13525, 0.0308499999999999, -0.02965, -0.47335], [-0.030899999999999983, -0.066, -0.414, 0.031]], [[1.0, 0.0, 0.0, 0.0], [-0.0005, -0.2159, 0.0327, 0.0223], [-0.1125, 0.0409, -0.0206, -0.3844], [-0.04299999999999998, -0.0456, -0.2796, 0.0256]], [[1.0, 0.0, 0.0, 0.0], [-0.000300000000000002, -0.1018, 0.0149, 0.0296], [-0.0876, 0.0408, -0.0115, -0.2895], [-0.05679999999999996, -0.0268, -0.1616, 0.0179]], [[1.0, 0.0, 0.0, 0.0], [-0.011, -0.5604, 0.0863, -0.0423], [-0.2198, -0.0101, -0.0321, -0.4476], [-0.039449999999999985, -0.07655, -0.50045, 0.02625]], [[1.0, 0.0, 0.0, 0.0], [-0.00875, -0.45095, 0.06945, -0.02485], [-0.2044, 0.00290000000000001, -0.0286, -0.4113], [-0.04740000000000005, -0.0665, -0.4291, 0.0252]], [[1.0, 0.0, 0.0, 0.0], [-0.0061, -0.3105, 0.0478, -0.0038], [-0.1805, 0.0176, -0.0232, -0.3561], [-0.06010000000000004, -0.052, -0.329, 0.023]], [[1.0, 0.0, 0.0, 0.0], [-0.00395, -0.17945, 0.02735, 0.01295], [-0.1507, 0.0276, -0.0163, -0.2888], [-0.07634999999999997, -0.03605, -0.22215, 0.01925]], [[1.0, 0.0, 0.0, 0.0], [-0.0028, -0.0844, 0.0125, 0.0206], [-0.11805, 0.02945, -0.00925000000000001, -0.21715], [-0.09489999999999998, -0.0212, -0.1284, 0.0138]]]
set_4 = [[[1.0, 0.0, 0.0, 0.0], [0.00459999999999994, 0.5255, 0.1955, 0.7633], [0.00324999999999998, -0.74135, -0.04835, 0.53265], [-0.0044500000000000095, 0.16285, -0.89815, 0.14775]], [[1.0, 0.0, 0.0, 0.0], [0.00440000000000007, 0.4852, 0.1405, 0.728], [0.00275000000000003, -0.59105, -0.03865, 0.44095], [-0.004400000000000015, 0.1602, -0.7506, 0.1638]], [[1.0, 0.0, 0.0, 0.0], [0.00405, 0.42335, 0.07165, 0.67275], [0.00194999999999995, -0.39955, -0.02605, 0.31825], [-0.0043500000000000205, 0.15395, -0.55265, 0.18515]], [[1.0, 0.0, 0.0, 0.0], [0.00364999999999993, 0.34665, 0.0111500000000001, 0.60145], [0.00119999999999998, -0.2239, -0.0146, 0.1964], [-0.0042500000000000315, 0.14205, -0.35495, 0.20575]], [[1.0, 0.0, 0.0, 0.0], [0.00314999999999999, 0.26315, -0.02625, 0.51785], [0.000600000000000003, -0.0998, -0.00650000000000001, 0.1002], [-0.0042500000000000315, 0.12425, -0.19695, 0.22055]], [[1.0, 0.0, 0.0, 0.0], [0.0176500000000001, 0.47935, 0.18885, 0.69085], [0.0125000000000001, -0.7011, -0.0458000000000001, 0.4991], [-0.018049999999999955, 0.14605, -0.83965, 0.12815]], [[1.0, 0.0, 0.0, 0.0], [0.01685, 0.44255, 0.13665, 0.65875], [0.01035, -0.55885, -0.03645, 0.41315], [-0.017749999999999988, 0.14395, -0.70155, 0.14325]], [[1.0, 0.0, 0.0, 0.0], [0.0155999999999999, 0.386, 0.0714, 0.6084], [0.00750000000000001, -0.3779, -0.0247, 0.2982], [-0.017449999999999966, 0.13855, -0.51625, 0.16325]], [[1.0, 0.0, 0.0, 0.0], [0.01405, 0.31605, 0.01375, 0.54365], [0.00459999999999999, -0.2117, -0.0138, 0.184], [-0.01715, 0.12805, -0.33115, 0.18265]], [[1.0, 0.0, 0.0, 0.0], [0.01215, 0.23985, -0.02225, 0.46795], [0.00235, -0.09445, -0.00615, 0.09385], [-0.01709999999999995, 0.1122, -0.1833, 0.1967]], [[1.0, 0.0, 0.0, 0.0], [0.03705, 0.41085, 0.17765, 0.58435], [0.02575, -0.63835, -0.04165, 0.44735], [-0.04075000000000001, 0.12155, -0.75005, 0.10035]], [[1.0, 0.0, 0.0, 0.0], [0.0354, 0.3792, 0.13, 0.5568], [0.02135, -0.50885, -0.0332500000000001, 0.37035], [-0.04024999999999995, 0.12015, -0.62635, 0.11385]], [[1.0, 0.0, 0.0, 0.0], [0.03285, 0.33055, 0.0701499999999999, 0.51385], [0.0154, -0.3441, -0.0225, 0.2673], [-0.03964999999999996, 0.11615, -0.46035, 0.13185]], [[1.0, 0.0, 0.0, 0.0], [0.0295, 0.2706, 0.0172, 0.4588], [0.00955, -0.19285, -0.01265, 0.16495], [-0.03915000000000002, 0.10765, -0.29465, 0.14935]], [[1.0, 0.0, 0.0, 0.0], [0.0256, 0.2053, -0.0163, 0.3946], [0.00484999999999999, -0.08595, -0.00564999999999999, 0.08415], [-0.03905000000000003, 0.09455, -0.16235, 0.16215]], [[1.0, 0.0, 0.0, 0.0], [0.06035, 0.33025, 0.16185, 0.46105], [0.04045, -0.55895, -0.03655, 0.38305], [-0.07269999999999999, 0.0934, -0.6393, 0.0699]], [[1.0, 0.0, 0.0, 0.0], [0.0577, 0.3046, 0.12, 0.439], [0.0335, -0.4455, -0.0291, 0.3171], [-0.07194999999999996, 0.09285, -0.53355, 0.08145]], [[1.0, 0.0, 0.0, 0.0], [0.0536, 0.2654, 0.0673, 0.4046], [0.02415, -0.30125, -0.01965, 0.22885], [-0.07105000000000006, 0.09015, -0.39155, 0.09685]], [[1.0, 0.0, 0.0, 0.0], [0.04825, 0.21705, 0.02025, 0.36075], [0.01495, -0.16885, -0.01105, 0.14125], [-0.07040000000000002, 0.0841, -0.2499, 0.112]], [[1.0, 0.0, 0.0, 0.0], [0.04185, 0.16465, -0.00984999999999997, 0.30985], [0.0076, -0.0752, -0.0049, 0.072], [-0.07040000000000002, 0.0741, -0.1369, 0.1232]], [[1.0, 0.0, 0.0, 0.0], [0.08535, 0.24825, 0.14215, 0.33845], [0.05365, -0.46965, -0.03065, 0.31275], [-0.11359999999999998, 0.0659, -0.519, 0.042]], [[1.0, 0.0, 0.0, 0.0], [0.08175, 0.22885, 0.10675, 0.32185], [0.04445, -0.37445, -0.02445, 0.25885], [-0.11270000000000002, 0.0659, -0.4328, 0.0514]], [[1.0, 0.0, 0.0, 0.0], [0.07595, 0.19925, 0.06225, 0.29615], [0.03205, -0.25315, -0.01655, 0.18685], [-0.11159999999999998, 0.0646, -0.3172, 0.064]], [[1.0, 0.0, 0.0, 0.0], [0.0685, 0.1628, 0.0222, 0.2635], [0.0198, -0.1419, -0.0093, 0.1153], [-0.11099999999999999, 0.0608, -0.2017, 0.0764]], [[1.0, 0.0, 0.0, 0.0], [0.05955, 0.12335, -0.00405, 0.22595], [0.0101, -0.0632, -0.00409999999999999, 0.0588], [-0.11115000000000003, 0.05385, -0.10975, 0.08575]]]
set_5 = [[[1.0, 0.0, 0.0, 0.0], [-0.00395000000000001, 0.49575, 0.58495, 0.51285], [0.00414999999999999, -0.25075, 0.70175, -0.54615], [-0.0021999999999999797, -0.7479, 0.1563, 0.5461]], [[1.0, 0.0, 0.0, 0.0], [-0.00319999999999998, 0.4113, 0.4986, 0.4234], [0.00355, -0.20365, 0.59805, -0.45525], [-0.0019000000000000128, -0.6721, 0.1408, 0.4938]], [[1.0, 0.0, 0.0, 0.0], [-0.00219999999999998, 0.2988, 0.3783, 0.3035], [0.00264999999999999, -0.14115, 0.45375, -0.33315], [-0.0015000000000000013, -0.5585, 0.1188, 0.4151]], [[1.0, 0.0, 0.0, 0.0], [-0.00125, 0.18755, 0.25135, 0.18425], [0.0018, -0.0804, 0.3015, -0.2111], [-0.0008999999999999009, -0.4239, 0.0947, 0.3208]], [[1.0, 0.0, 0.0, 0.0], [-0.0005, 0.0997, 0.1421, 0.0903], [0.00109999999999999, -0.0344, 0.1704, -0.1132], [-0.00029999999999996696, -0.2871, 0.0718, 0.2235]], [[1.0, 0.0, 0.0, 0.0], [-0.01465, 0.46425, 0.54415, 0.48075], [0.0161, -0.236, 0.6527, -0.5109], [-0.008299999999999974, -0.6867, 0.1436, 0.5006]], [[1.0, 0.0, 0.0, 0.0], [-0.0119, 0.3851, 0.4638, 0.397], [0.0136, -0.1917, 0.5563, -0.4258], [-0.007249999999999979, -0.61715, 0.12915, 0.45275]], [[1.0, 0.0, 0.0, 0.0], [-0.00819999999999999, 0.2796, 0.3519, 0.2847], [0.01025, -0.13315, 0.42215, -0.31155], [-0.005649999999999988, -0.51285, 0.10875, 0.38055]], [[1.0, 0.0, 0.0, 0.0], [-0.00455, 0.17535, 0.23375, 0.17295], [0.00689999999999999, -0.076, 0.2805, -0.1972], [-0.0036499999999999866, -0.38935, 0.08645, 0.29415]], [[1.0, 0.0, 0.0, 0.0], [-0.00180000000000001, 0.0931, 0.1322, 0.0849], [0.0042, -0.0327, 0.1585, -0.1056], [-0.0014000000000000123, -0.2638, 0.0654, 0.205]], [[1.0, 0.0, 0.0, 0.0], [-0.02945, 0.41595, 0.48185, 0.43125], [0.034, -0.2131, 0.578, -0.4568], [-0.017249999999999988, -0.59495, 0.12455, 0.43265]], [[1.0, 0.0, 0.0, 0.0], [-0.02385, 0.34485, 0.41075, 0.35625], [0.0288, -0.1732, 0.4927, -0.3806], [-0.015149999999999997, -0.53475, 0.11175, 0.39125]], [[1.0, 0.0, 0.0, 0.0], [-0.01635, 0.25025, 0.31175, 0.25565], [0.02185, -0.12055, 0.37385, -0.27825], [-0.01194999999999996, -0.44445, 0.09385, 0.32885]], [[1.0, 0.0, 0.0, 0.0], [-0.00895000000000001, 0.15675, 0.20715, 0.15555], [0.01485, -0.06915, 0.24845, -0.17595], [-0.007949999999999957, -0.33755, 0.07425, 0.25425]], [[1.0, 0.0, 0.0, 0.0], [-0.00325, 0.08295, 0.11705, 0.07655], [0.00914999999999999, -0.03015, 0.14035, -0.09405], [-0.0035499999999999976, -0.22885, 0.05585, 0.17725]], [[1.0, 0.0, 0.0, 0.0], [-0.0444, 0.356, 0.4056, 0.3696], [0.05545, -0.18415, 0.48655, -0.38975], [-0.028000000000000025, -0.4854, 0.1019, 0.3518]], [[1.0, 0.0, 0.0, 0.0], [-0.0357, 0.295, 0.3457, 0.3055], [0.04715, -0.14995, 0.41475, -0.32465], [-0.024899999999999978, -0.4363, 0.0912, 0.3181]], [[1.0, 0.0, 0.0, 0.0], [-0.0241, 0.2137, 0.2624, 0.2194], [0.036, -0.1046, 0.3148, -0.2372], [-0.020100000000000007, -0.3628, 0.0762, 0.2674]], [[1.0, 0.0, 0.0, 0.0], [-0.0128, 0.1336, 0.1743, 0.1337], [0.0247, -0.0604, 0.2092, -0.1497], [-0.014150000000000051, -0.27565, 0.05995, 0.20675]], [[1.0, 0.0, 0.0, 0.0], [-0.0042, 0.0705, 0.0985, 0.066], [0.0154, -0.0266, 0.1182, -0.0798], [-0.007600000000000051, -0.1871, 0.0448, 0.1442]], [[1.0, 0.0, 0.0, 0.0], [-0.05545, 0.29065, 0.32375, 0.30195], [0.07775, -0.15205, 0.38835, -0.31685], [-0.04059999999999997, -0.3719, 0.0787, 0.2683]], [[1.0, 0.0, 0.0, 0.0], [-0.0442, 0.2407, 0.276, 0.2497], [0.06645, -0.12395, 0.33105, -0.26375], [-0.036750000000000005, -0.33435, 0.07015, 0.24265]], [[1.0, 0.0, 0.0, 0.0], [-0.0292, 0.1741, 0.2095, 0.1795], [0.0512, -0.0868, 0.2512, -0.1925], [-0.030799999999999994, -0.2781, 0.0582, 0.204]], [[1.0, 0.0, 0.0, 0.0], [-0.01465, 0.10855, 0.13915, 0.10965], [0.0356, -0.0504, 0.167, -0.1213], [-0.023349999999999982, -0.21145, 0.04535, 0.15775]], [[1.0, 0.0, 0.0, 0.0], [-0.0038, 0.057, 0.0787, 0.0544], [0.02255, -0.02255, 0.09445, -0.06445], [-0.015300000000000036, -0.1437, 0.0335, 0.1101]]]
set_6 = [[[1.0, 0.0, 0.0, 0.0], [0.00650000000000001, -0.2074, -0.8406, 0.3198], [0.0051500000000001, -0.00735000000000008, 0.32545, 0.86845], [0.00035000000000001696, -0.91695, 0.17305, -0.06395]], [[1.0, 0.0, 0.0, 0.0], [0.00629999999999997, -0.2187, -0.6941, 0.2759], [0.00460000000000005, -0.0221, 0.2718, 0.7461], [0.00029999999999996696, -0.8235, 0.1164, -0.0265]], [[1.0, 0.0, 0.0, 0.0], [0.00589999999999999, -0.2223, -0.5012, 0.2176], [0.00380000000000003, -0.0382, 0.199, 0.5749], [0.00029999999999996696, -0.6889, 0.0462, 0.0217]], [[1.0, 0.0, 0.0, 0.0], [0.00539999999999999, -0.2062, -0.3134, 0.1587], [0.00279999999999997, -0.0474, 0.1252, 0.3921], [0.000250000000000028, -0.53675, -0.01345, 0.06515]], [[1.0, 0.0, 0.0, 0.0], [0.00474999999999999, -0.16745, -0.16715, 0.10805], [0.00185000000000002, -0.04525, 0.06535, 0.23155], [0.00019999999999997797, -0.3884, -0.0466, 0.0912]], [[1.0, 0.0, 0.0, 0.0], [0.0255, -0.1818, -0.7884, 0.2969], [0.0196, -0.00259999999999994, 0.3043, 0.8064], [0.0006999999999999784, -0.8425, 0.1692, -0.0666]], [[1.0, 0.0, 0.0, 0.0], [0.02465, -0.19375, -0.65065, 0.25575], [0.0175, -0.0168999999999999, 0.2541, 0.6927], [0.0005999999999999894, -0.7561, 0.1155, -0.0313]], [[1.0, 0.0, 0.0, 0.0], [0.02315, -0.19915, -0.46935, 0.20105], [0.0144, -0.0327, 0.1861, 0.5336], [0.0004999999999999449, -0.6317, 0.0487, 0.0142]], [[1.0, 0.0, 0.0, 0.0], [0.0212, -0.1865, -0.2931, 0.1461], [0.0108, -0.0421, 0.1171, 0.3638], [0.00039999999999995595, -0.4915, -0.0084, 0.0555]], [[1.0, 0.0, 0.0, 0.0], [0.0187, -0.1523, -0.1561, 0.0991], [0.00719999999999998, -0.0409, 0.0612, 0.2147], [0.00019999999999997797, -0.3549, -0.0407, 0.0807]], [[1.0, 0.0, 0.0, 0.0], [0.0553, -0.1445, -0.7081, 0.2624], [0.0406, 0.00390000000000001, 0.2718, 0.7119], [-0.0008999999999999564, -0.731, 0.162, -0.0692]], [[1.0, 0.0, 0.0, 0.0], [0.0534, -0.1573, -0.5838, 0.2253], [0.0363000000000001, -0.00960000000000005, 0.227, 0.6114], [-0.0009999999999999454, -0.6552, 0.1128, -0.0373]], [[1.0, 0.0, 0.0, 0.0], [0.0503, -0.1652, -0.4205, 0.1762], [0.0299, -0.0246, 0.1662, 0.4708], [-0.0012000000000000344, -0.5463, 0.0516, 0.004]], [[1.0, 0.0, 0.0, 0.0], [0.046, -0.1571, -0.2619, 0.1272], [0.02245, -0.03425, 0.10465, 0.32075], [-0.0014499999999999513, -0.42375, -0.00135, 0.04185]], [[1.0, 0.0, 0.0, 0.0], [0.0406, -0.1298, -0.1389, 0.0857], [0.01505, -0.03445, 0.05465, 0.18915], [-0.0017000000000000348, -0.3052, -0.0321, 0.0656]], [[1.0, 0.0, 0.0, 0.0], [0.09335, -0.10205, -0.60825, 0.22065], [0.0645, 0.0105999999999999, 0.2314, 0.5967], [-0.007100000000000051, -0.5983, 0.1507, -0.0699]], [[1.0, 0.0, 0.0, 0.0], [0.09025, -0.11555, -0.50095, 0.18865], [0.05765, -0.00164999999999993, 0.19335, 0.51235], [-0.00720000000000004, -0.5353, 0.1075, -0.0423]], [[1.0, 0.0, 0.0, 0.0], [0.08505, -0.12585, -0.36005, 0.14645], [0.0476, -0.0158, 0.1416, 0.3943], [-0.007400000000000018, -0.445, 0.0533, -0.0064]], [[1.0, 0.0, 0.0, 0.0], [0.0779, -0.1229, -0.2235, 0.1047], [0.03575, -0.02535, 0.08925, 0.26845], [-0.007600000000000051, -0.3439, 0.0059, 0.0269]], [[1.0, 0.0, 0.0, 0.0], [0.0688, -0.1033, -0.1179, 0.0698], [0.02395, -0.02685, 0.04665, 0.15805], [-0.007949999999999957, -0.24645, -0.02245, 0.04855]], [[1.0, 0.0, 0.0, 0.0], [0.13685, -0.06115, -0.49925, 0.17665], [0.0872000000000001, 0.016, 0.1875, 0.4738], [-0.020799999999999985, -0.4611, 0.1351, -0.0673]], [[1.0, 0.0, 0.0, 0.0], [0.13235, -0.07475, -0.41055, 0.15015], [0.07805, 0.00524999999999998, 0.15675, 0.40665], [-0.020850000000000035, -0.41155, 0.09865, -0.04445]], [[1.0, 0.0, 0.0, 0.0], [0.12485, -0.08685, -0.29425, 0.11545], [0.06445, -0.00735000000000002, 0.11495, 0.31275], [-0.020950000000000024, -0.34075, 0.05265, -0.01465]], [[1.0, 0.0, 0.0, 0.0], [0.1144, -0.0885, -0.1819, 0.0814], [0.04855, -0.01655, 0.07245, 0.21265], [-0.021150000000000002, -0.26185, 0.01205, 0.01335]], [[1.0, 0.0, 0.0, 0.0], [0.1012, -0.0764, -0.0954, 0.0535], [0.0327, -0.0192, 0.0379, 0.125], [-0.02140000000000003, -0.1866, -0.0132, 0.0322]]]
set_7 = [[[1.0, 0.0, 0.0, 0.0], [0.0, -0.9024, 0.0854, -0.0941], [-0.00609999999999999, -0.0204, 0.6006, 0.7405], [-0.00714999999999999, 0.12665, 0.71865, -0.56185]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.7193, 0.0681, -0.075], [-0.00584999999999991, -0.0162500000000001, 0.59445, 0.69525], [-0.00714999999999999, 0.10485, 0.61465, -0.44725]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.4864, 0.0461, -0.0507], [-0.00544999999999995, -0.01105, 0.57705, 0.62905], [-0.00714999999999999, 0.07565, 0.47545, -0.29385]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.2725, 0.0258, -0.0284], [-0.005, -0.00609999999999999, 0.5422, 0.5511], [-0.00714999999999999, 0.04675, 0.33725, -0.14155]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.1215, 0.0115, -0.0127], [-0.00434999999999997, -0.00275000000000003, 0.48655, 0.46785], [-0.00714999999999999, 0.02385, 0.22815, -0.02135]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.8534, 0.0808, -0.089], [-0.0247000000000001, -0.0193, 0.538, 0.6731], [-0.028500000000000136, 0.1187, 0.6685, -0.5309]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.6803, 0.0644, -0.071], [-0.02375, -0.01535, 0.53325, 0.63135], [-0.02845000000000003, 0.0982500000000001, 0.57095, -0.42345]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.46, 0.0436, -0.048], [-0.02225, -0.01035, 0.51875, 0.57035], [-0.02845000000000003, 0.07085, 0.44055, -0.27975]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.2577, 0.0244, -0.0269], [-0.0202, -0.00580000000000003, 0.4882, 0.4989], [-0.02849999999999997, 0.0438, 0.3111, -0.1371]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.1149, 0.0109, -0.012], [-0.01765, -0.00264999999999999, 0.43865, 0.42295], [-0.02849999999999997, 0.0223, 0.2089, -0.0244]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.777, 0.0736, -0.0811], [-0.0567, -0.0176000000000001, 0.4467, 0.5737], [-0.06360000000000005, 0.1064, 0.5923, -0.4821]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.6194, 0.0586, -0.0646], [-0.0546, -0.0139999999999999, 0.444, 0.5371], [-0.06355, 0.08805, 0.50495, -0.38585]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.4188, 0.0397, -0.0437], [-0.05115, -0.00945000000000001, 0.43335, 0.48405], [-0.06355, 0.06355, 0.38805, -0.25705]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.2347, 0.0222, -0.0245], [-0.04645, -0.00524999999999998, 0.40915, 0.42215], [-0.06355, 0.03925, 0.27195, -0.12915]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.1046, 0.0099, -0.0109], [-0.0406, -0.00240000000000001, 0.3685, 0.3571], [-0.06355, 0.01995, 0.18025, -0.02815]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.6803, 0.0644, -0.071], [-0.10245, -0.01535, 0.34235, 0.45805], [-0.11155000000000004, 0.0911500000000001, 0.49955, -0.41975]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.5423, 0.0513, -0.0566], [-0.09865, -0.01225, 0.34185, 0.42765], [-0.11149999999999999, 0.0754, 0.4247, -0.3373]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.3667, 0.0347, -0.0383], [-0.0924, -0.00829999999999997, 0.3354, 0.3839], [-0.11149999999999999, 0.0544, 0.3246, -0.227]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.2055, 0.0195, -0.0214], [-0.0839, -0.00469999999999998, 0.3183, 0.3334], [-0.11149999999999999, 0.0336, 0.2252, -0.1175]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.0916, 0.0087, -0.0096], [-0.07335, -0.00205, 0.28775, 0.28095], [-0.11149999999999999, 0.0171, 0.1468, -0.031]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.5717, 0.0541, -0.0596], [-0.16045, -0.01285, 0.24055, 0.34215], [-0.17084999999999995, 0.07435, 0.40065, -0.34915]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.4557, 0.0431, -0.0475], [-0.1545, -0.0103, 0.2419, 0.3182], [-0.17084999999999995, 0.06155, 0.33955, -0.28185]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.3081, 0.0292, -0.0321], [-0.1447, -0.00700000000000001, 0.2394, 0.284], [-0.17084999999999995, 0.04445, 0.25785, -0.19185]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.1727, 0.0163, -0.018], [-0.1314, -0.00390000000000001, 0.2289, 0.2451], [-0.17084999999999995, 0.02745, 0.17675, -0.10245]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.077, 0.0073, -0.008], [-0.11485, -0.00175, 0.20805, 0.20545], [-0.17080000000000006, 0.014, 0.1126, -0.0318]]]
set_8 = [[[1.0, 0.0, 0.0, 0.0], [-0.00240000000000001, -0.7696, -0.1231, -0.4768], [0.00320000000000004, -0.1604, -0.7692, 0.4661], [0.010049999999999948, -0.48525, 0.49445, 0.66295]], [[1.0, 0.0, 0.0, 0.0], [-0.00195000000000001, -0.61585, -0.09605, -0.39165], [0.00269999999999998, -0.1288, -0.6124, 0.3869], [0.010099999999999998, -0.4713, 0.4737, 0.6612]], [[1.0, 0.0, 0.0, 0.0], [-0.00130000000000002, -0.4196, -0.062, -0.2782], [0.00195000000000001, -0.08815, -0.41325, 0.28065], [0.010049999999999948, -0.44715, 0.44035, 0.65875]], [[1.0, 0.0, 0.0, 0.0], [-0.000650000000000012, -0.23875, -0.03165, -0.16635], [0.00120000000000001, -0.0503, -0.2307, 0.1746], [0.010049999999999948, -0.41215, 0.39575, 0.65615]], [[1.0, 0.0, 0.0, 0.0], [-0.00025, -0.10965, -0.01115, -0.07915], [0.000649999999999998, -0.02315, -0.10235, 0.09015], [0.010000000000000009, -0.3658, 0.3417, 0.6538]], [[1.0, 0.0, 0.0, 0.0], [-0.0091, -0.7272, -0.117, -0.4475], [0.01215, -0.15145, -0.72765, 0.43645], [0.03694999999999987, -0.43715, 0.44685, 0.59335]], [[1.0, 0.0, 0.0, 0.0], [-0.00729999999999997, -0.5818, -0.0913, -0.3677], [0.0101, -0.1215, -0.5793, 0.3623], [0.03689999999999999, -0.4246, 0.4282, 0.5917]], [[1.0, 0.0, 0.0, 0.0], [-0.00490000000000002, -0.3964, -0.0591, -0.2613], [0.00739999999999996, -0.0831999999999999, -0.3909, 0.2628], [0.03684999999999988, -0.40305, 0.39795, 0.58945]], [[1.0, 0.0, 0.0, 0.0], [-0.0025, -0.2254, -0.0303, -0.1564], [0.00465000000000002, -0.04755, -0.21835, 0.16345], [0.03680000000000011, -0.3716, 0.3576, 0.587]], [[1.0, 0.0, 0.0, 0.0], [-0.000799999999999995, -0.1035, -0.0108, -0.0746], [0.00245000000000001, -0.02185, -0.09685, 0.08445], [0.036749999999999894, -0.32995, 0.30865, 0.58485]], [[1.0, 0.0, 0.0, 0.0], [-0.0187, -0.6611, -0.1074, -0.4023], [0.02505, -0.13755, -0.66285, 0.39075], [0.07139999999999996, -0.3665, 0.3771, 0.4925]], [[1.0, 0.0, 0.0, 0.0], [-0.01495, -0.52885, -0.08385, -0.33065], [0.02085, -0.11035, -0.52775, 0.32435], [0.07135000000000002, -0.35625, 0.36125, 0.49105]], [[1.0, 0.0, 0.0, 0.0], [-0.00994999999999999, -0.36025, -0.05445, -0.23515], [0.01525, -0.07555, -0.35615, 0.23525], [0.07125000000000004, -0.33835, 0.33575, 0.48905]], [[1.0, 0.0, 0.0, 0.0], [-0.00519999999999998, -0.2046, -0.028, -0.141], [0.00955, -0.04315, -0.19885, 0.14635], [0.07110000000000011, -0.3121, 0.3016, 0.4869]], [[1.0, 0.0, 0.0, 0.0], [-0.00159999999999999, -0.0938, -0.0102, -0.0675], [0.00505, -0.01985, -0.08825, 0.07555], [0.07100000000000012, -0.2773, 0.2603, 0.485]], [[1.0, 0.0, 0.0, 0.0], [-0.0291, -0.5778, -0.095, -0.3458], [0.03925, -0.11995, -0.58055, 0.33405], [0.10190000000000005, -0.2856, 0.2964, 0.3782]], [[1.0, 0.0, 0.0, 0.0], [-0.02325, -0.46205, -0.07435, -0.28435], [0.0327, -0.0962, -0.4623, 0.2773], [0.10179999999999995, -0.2777, 0.2839, 0.377]], [[1.0, 0.0, 0.0, 0.0], [-0.01555, -0.31455, -0.04835, -0.20245], [0.0239, -0.0659, -0.312, 0.2011], [0.10159999999999997, -0.264, 0.2638, 0.3753]], [[1.0, 0.0, 0.0, 0.0], [-0.00805, -0.17855, -0.02505, -0.12165], [0.015, -0.0376, -0.1742, 0.1251], [0.10145000000000004, -0.24385, 0.23685, 0.37355]], [[1.0, 0.0, 0.0, 0.0], [-0.0024, -0.0817, -0.0093, -0.0586], [0.00785000000000001, -0.01725, -0.07725, 0.06455], [0.10129999999999995, -0.2169, 0.2043, 0.3719]], [[1.0, 0.0, 0.0, 0.0], [-0.03835, -0.48435, -0.08085, -0.28355], [0.05215, -0.10025, -0.48825, 0.27215], [0.11749999999999994, -0.2058, 0.2164, 0.2678]], [[1.0, 0.0, 0.0, 0.0], [-0.03055, -0.38725, -0.06345, -0.23335], [0.0435, -0.0805, -0.3889, 0.2259], [0.11735000000000001, -0.20035, 0.20725, 0.26685]], [[1.0, 0.0, 0.0, 0.0], [-0.02035, -0.26345, -0.04135, -0.16635], [0.0317, -0.055, -0.2624, 0.1638], [0.11715000000000003, -0.19075, 0.19245, 0.26555]], [[1.0, 0.0, 0.0, 0.0], [-0.0104, -0.1494, -0.0217, -0.1002], [0.01995, -0.03145, -0.14655, 0.10185], [0.1169, -0.1764, 0.1728, 0.2641]], [[1.0, 0.0, 0.0, 0.0], [-0.00305, -0.06815, -0.00815, -0.04855], [0.01045, -0.01435, -0.06505, 0.05255], [0.11670000000000003, -0.1571, 0.149, 0.2628]]]
set_9 = [[[1.0, 0.0, 0.0, 0.0], [-0.000299999999999967, -0.7045, 0.1126, -0.5786], [0.00135000000000002, -0.38875, -0.75425, 0.33925], [-0.0007999999999999119, -0.4374, 0.5119, 0.6757]], [[1.0, 0.0, 0.0, 0.0], [-0.000199999999999978, -0.5535, 0.0790999999999999, -0.515], [0.00109999999999999, -0.3142, -0.5967, 0.299], [-0.0007499999999998619, -0.39285, 0.46315, 0.69125]], [[1.0, 0.0, 0.0, 0.0], [-0.000149999999999983, -0.36255, 0.03905, -0.42515], [0.000799999999999995, -0.2185, -0.3974, 0.2418], [-0.0006500000000000394, -0.33015, 0.39275, 0.71375]], [[1.0, 0.0, 0.0, 0.0], [-4.99999999999945e-05, -0.18985, 0.00645000000000001, -0.32605], [0.0005, -0.129, -0.2163, 0.179], [-0.0006000000000001005, -0.2629, 0.3131, 0.739]], [[1.0, 0.0, 0.0, 0.0], [0.0, -0.0715, -0.0107, -0.232], [0.000200000000000006, -0.0637, -0.0907, 0.1208], [-0.00045000000000006146, -0.20385, 0.23595, 0.76285]], [[1.0, 0.0, 0.0, 0.0], [-0.000649999999999928, -0.66845, 0.10935, -0.53285], [0.00490000000000002, -0.3664, -0.7145, 0.3131], [-0.0041500000000000425, -0.40205, 0.46945, 0.60065]], [[1.0, 0.0, 0.0, 0.0], [-0.000450000000000006, -0.52545, 0.07735, -0.47385], [0.00405, -0.29605, -0.56545, 0.27585], [-0.0039000000000000146, -0.3606, 0.4244, 0.615]], [[1.0, 0.0, 0.0, 0.0], [-0.000200000000000033, -0.3446, 0.039, -0.3905], [0.00285000000000002, -0.20565, -0.37675, 0.22285], [-0.0036499999999999866, -0.30235, 0.35945, 0.63575]], [[1.0, 0.0, 0.0, 0.0], [4.99999999999945e-05, -0.18095, 0.00774999999999998, -0.29895], [0.00169999999999998, -0.1213, -0.2052, 0.1647], [-0.003300000000000025, -0.24, 0.286, 0.6591]], [[1.0, 0.0, 0.0, 0.0], [0.000149999999999983, -0.06865, -0.00904999999999997, -0.21225], [0.000799999999999995, -0.0598, -0.0862, 0.1109], [-0.002950000000000008, -0.18505, 0.21505, 0.68115]], [[1.0, 0.0, 0.0, 0.0], [-4.99999999999945e-05, -0.61185, 0.10375, -0.46425], [0.00964999999999999, -0.33195, -0.65235, 0.27375], [-0.012450000000000017, -0.34905, 0.40615, 0.49255]], [[1.0, 0.0, 0.0, 0.0], [0.000250000000000028, -0.48135, 0.07425, -0.41215], [0.0078, -0.268, -0.5164, 0.2409], [-0.01200000000000001, -0.3125, 0.3667, 0.5051]], [[1.0, 0.0, 0.0, 0.0], [0.000649999999999984, -0.31635, 0.03855, -0.33875], [0.0055, -0.186, -0.3443, 0.1943], [-0.01144999999999996, -0.26105, 0.30985, 0.52325]], [[1.0, 0.0, 0.0, 0.0], [0.000899999999999956, -0.1669, 0.00940000000000002, -0.2584], [0.00315000000000001, -0.10945, -0.18775, 0.14325], [-0.01074999999999987, -0.20605, 0.24565, 0.54365]], [[1.0, 0.0, 0.0, 0.0], [0.000950000000000006, -0.06415, -0.00665000000000002, -0.18285], [0.0014, -0.0537, -0.0792, 0.0961], [-0.010049999999999948, -0.15755, 0.18385, 0.56295]], [[1.0, 0.0, 0.0, 0.0], [0.00269999999999998, -0.5393, 0.0959, -0.3824], [0.0139, -0.2886, -0.5732, 0.2264], [-0.02845000000000003, -0.28605, 0.33095, 0.37125]], [[1.0, 0.0, 0.0, 0.0], [0.003, -0.4248, 0.0695, -0.3386], [0.0112, -0.2328, -0.454, 0.199], [-0.027849999999999986, -0.25545, 0.29825, 0.38165]], [[1.0, 0.0, 0.0, 0.0], [0.00325000000000003, -0.27985, 0.03745, -0.27715], [0.00764999999999999, -0.16135, -0.30305, 0.16005], [-0.02695000000000003, -0.21235, 0.25115, 0.39665]], [[1.0, 0.0, 0.0, 0.0], [0.00325, -0.14845, 0.01095, -0.21035], [0.0042, -0.0946, -0.1656, 0.1176], [-0.025949999999999973, -0.16615, 0.19825, 0.41355]], [[1.0, 0.0, 0.0, 0.0], [0.00295000000000001, -0.05795, -0.00395000000000001, -0.14805], [0.0016, -0.0461, -0.0701, 0.0785], [-0.024899999999999978, -0.1256, 0.1474, 0.4295]], [[1.0, 0.0, 0.0, 0.0], [0.0086, -0.4569, 0.0856, -0.2973], [0.0164, -0.2405, -0.4838, 0.1768], [-0.054249999999999965, -0.22105, 0.25365, 0.25585]], [[1.0, 0.0, 0.0, 0.0], [0.0086, -0.3603, 0.0629, -0.2624], [0.0129, -0.1938, -0.3834, 0.1551], [-0.05350000000000005, -0.1967, 0.228, 0.264]], [[1.0, 0.0, 0.0, 0.0], [0.00835, -0.23815, 0.03525, -0.21365], [0.00839999999999999, -0.134, -0.2561, 0.1244], [-0.0524, -0.1625, 0.1912, 0.2757]], [[1.0, 0.0, 0.0, 0.0], [0.00769999999999998, -0.1272, 0.012, -0.161], [0.00415, -0.07835, -0.14035, 0.09095], [-0.05109999999999998, -0.1259, 0.15, 0.2889]], [[1.0, 0.0, 0.0, 0.0], [0.00645, -0.05055, -0.00144999999999999, -0.11245], [0.00105, -0.03795, -0.05965, 0.06035], [-0.04984999999999995, -0.09355, 0.11065, 0.30125]]]
set_10 = [[[1.0, 0.0, 0.0, 0.0], [0.00634999999999997, 0.63315, -0.46445, 0.50475], [-0.000899999999999998, -0.5946, -0.6985, 0.0833], [-0.005199999999999927, 0.3537, -0.3745, -0.7755]], [[1.0, 0.0, 0.0, 0.0], [0.00564999999999999, 0.57285, -0.39835, 0.43295], [-0.00115, -0.51765, -0.56215, 0.07915], [-0.005350000000000132, 0.33375, -0.31355, -0.66895]], [[1.0, 0.0, 0.0, 0.0], [0.00465000000000004, 0.47965, -0.30565, 0.33495], [-0.0014, -0.4099, -0.385, 0.0692], [-0.00550000000000006, 0.303, -0.2334, -0.5163]], [[1.0, 0.0, 0.0, 0.0], [0.0035, 0.3658, -0.2067, 0.2331], [-0.0017, -0.2949, -0.2168, 0.0518], [-0.005549999999999999, 0.26525, -0.15615, -0.34735]], [[1.0, 0.0, 0.0, 0.0], [0.00234999999999999, 0.24795, -0.11985, 0.14465], [-0.0017, -0.1938, -0.093, 0.0289], [-0.005600000000000049, 0.2254, -0.0969, -0.1912]], [[1.0, 0.0, 0.0, 0.0], [0.02485, 0.58025, -0.43145, 0.46905], [-0.00405, -0.55055, -0.65885, 0.07515], [-0.020850000000000035, 0.32095, -0.35025, -0.71915]], [[1.0, 0.0, 0.0, 0.0], [0.0221499999999999, 0.52525, -0.36995, 0.40195], [-0.00495, -0.47895, -0.53045, 0.07185], [-0.02124999999999999, 0.30265, -0.29285, -0.62055]], [[1.0, 0.0, 0.0, 0.0], [0.0182, 0.4401, -0.2839, 0.3106], [-0.006, -0.3789, -0.3635, 0.0633], [-0.02180000000000004, 0.2747, -0.2176, -0.4793]], [[1.0, 0.0, 0.0, 0.0], [0.0136, 0.336, -0.1919, 0.2157], [-0.00685, -0.27205, -0.20515, 0.04785], [-0.022149999999999948, 0.24025, -0.14495, -0.32305]], [[1.0, 0.0, 0.0, 0.0], [0.0091, 0.2278, -0.1113, 0.1336], [-0.007, -0.1782, -0.0884, 0.0271], [-0.022199999999999998, 0.2037, -0.0893, -0.1787]], [[1.0, 0.0, 0.0, 0.0], [0.0537, 0.5009, -0.3811, 0.4149], [-0.0108, -0.4837, -0.5972, 0.0629], [-0.04690000000000011, 0.2728, -0.3129, -0.6334]], [[1.0, 0.0, 0.0, 0.0], [0.04795, 0.45385, -0.32685, 0.35505], [-0.01255, -0.42045, -0.48105, 0.06085], [-0.04764999999999997, 0.25705, -0.26125, -0.54685]], [[1.0, 0.0, 0.0, 0.0], [0.0394, 0.3808, -0.2508, 0.2736], [-0.01455, -0.33195, -0.33005, 0.05435], [-0.04865000000000003, 0.23295, -0.19335, -0.42295]], [[1.0, 0.0, 0.0, 0.0], [0.02945, 0.29115, -0.16955, 0.18945], [-0.0159, -0.2376, -0.1869, 0.0418], [-0.049350000000000005, 0.20335, -0.12785, -0.28585]], [[1.0, 0.0, 0.0, 0.0], [0.01965, 0.19775, -0.09825, 0.11695], [-0.01595, -0.15475, -0.08115, 0.02435], [-0.049350000000000005, 0.17185, -0.07785, -0.15925]], [[1.0, 0.0, 0.0, 0.0], [0.08995, 0.40635, -0.31955, 0.34905], [-0.02275, -0.40295, -0.51935, 0.04845], [-0.08305000000000001, 0.21665, -0.26715, -0.52885]], [[1.0, 0.0, 0.0, 0.0], [0.0803, 0.3687, -0.2741, 0.298], [-0.02515, -0.34985, -0.41865, 0.04775], [-0.08420000000000005, 0.2039, -0.2225, -0.4569]], [[1.0, 0.0, 0.0, 0.0], [0.06595, 0.31005, -0.21025, 0.22885], [-0.02785, -0.27545, -0.28775, 0.04355], [-0.08559999999999995, 0.1845, -0.1639, -0.3539]], [[1.0, 0.0, 0.0, 0.0], [0.04935, 0.23755, -0.14215, 0.15775], [-0.02945, -0.19625, -0.16345, 0.03435], [-0.08644999999999997, 0.16055, -0.10735, -0.24005]], [[1.0, 0.0, 0.0, 0.0], [0.0329, 0.1616, -0.0824, 0.0968], [-0.0288, -0.1269, -0.0717, 0.0207], [-0.08624999999999994, 0.13515, -0.06435, -0.13495]], [[1.0, 0.0, 0.0, 0.0], [0.12945, 0.30855, -0.25395, 0.27895], [-0.0414, -0.3177, -0.4325, 0.0338], [-0.12884999999999996, 0.16025, -0.21765, -0.41745]], [[1.0, 0.0, 0.0, 0.0], [0.1155, 0.2806, -0.2178, 0.2375], [-0.0441, -0.2753, -0.3488, 0.0342], [-0.13030000000000003, 0.1508, -0.1807, -0.361]], [[1.0, 0.0, 0.0, 0.0], [0.0949, 0.2366, -0.1671, 0.1815], [-0.04675, -0.21615, -0.24015, 0.03235], [-0.13185000000000002, 0.13615, -0.13235, -0.28015]], [[1.0, 0.0, 0.0, 0.0], [0.07095, 0.18185, -0.11295, 0.12425], [-0.04765, -0.15315, -0.13705, 0.02635], [-0.13270000000000004, 0.1181, -0.0857, -0.1908]], [[1.0, 0.0, 0.0, 0.0], [0.04735, 0.12405, -0.06545, 0.07575], [-0.0455, -0.098, -0.0608, 0.0166], [-0.13209999999999994, 0.0989, -0.0503, -0.1084]]]
set_11 = [[[1.0, 0.0, 0.0, 0.0], [0.00429999999999997, -0.6295, -0.5431, -0.3852], [0.000900000000000012, -0.3587, -0.1711, 0.8206], [0.009800000000000031, -0.5956, 0.7433, -0.1031]], [[1.0, 0.0, 0.0, 0.0], [0.00359999999999999, -0.5269, -0.4287, -0.3118], [0.000700000000000034, -0.2916, -0.1371, 0.6531], [0.00984999999999997, -0.59965, 0.71115, -0.10655]], [[1.0, 0.0, 0.0, 0.0], [0.00259999999999999, -0.388, -0.2844, -0.2168], [0.000400000000000011, -0.2035, -0.0939, 0.4404], [0.00984999999999997, -0.60615, 0.65965, -0.11125]], [[1.0, 0.0, 0.0, 0.0], [0.00164999999999998, -0.24725, -0.15405, -0.12715], [0.000150000000000011, -0.11885, -0.05445, 0.24595], [0.009949999999999959, -0.61495, 0.59115, -0.11585]], [[1.0, 0.0, 0.0, 0.0], [0.000850000000000004, -0.13275, -0.06445, -0.06095], [-5.00000000000084e-05, -0.05465, -0.02665, 0.10935], [0.010049999999999948, -0.62525, 0.50885, -0.11945]], [[1.0, 0.0, 0.0, 0.0], [0.01625, -0.58825, -0.51485, -0.36295], [0.00334999999999996, -0.33765, -0.16165, 0.77635], [0.036750000000000005, -0.53185, 0.67205, -0.09135]], [[1.0, 0.0, 0.0, 0.0], [0.01355, -0.49235, -0.40645, -0.29375], [0.00254999999999994, -0.27445, -0.12945, 0.61785], [0.03685000000000005, -0.53545, 0.64305, -0.09465]], [[1.0, 0.0, 0.0, 0.0], [0.00989999999999999, -0.3624, -0.2697, -0.2042], [0.00155, -0.19165, -0.08865, 0.41665], [0.03699999999999998, -0.5414, 0.5963, -0.099]], [[1.0, 0.0, 0.0, 0.0], [0.0063, -0.231, -0.1462, -0.1197], [0.000549999999999995, -0.11205, -0.05135, 0.23265], [0.037250000000000005, -0.54935, 0.53425, -0.10325]], [[1.0, 0.0, 0.0, 0.0], [0.00335, -0.12395, -0.06135, -0.05735], [-0.000199999999999992, -0.0517, -0.025, 0.1034], [0.03750000000000003, -0.5586, 0.4598, -0.1067]], [[1.0, 0.0, 0.0, 0.0], [0.03355, -0.52495, -0.47055, -0.32845], [0.00675000000000003, -0.30485, -0.14695, 0.70735], [0.07390000000000002, -0.4396, 0.5674, -0.0744]], [[1.0, 0.0, 0.0, 0.0], [0.02795, -0.43925, -0.37155, -0.26575], [0.00514999999999999, -0.24785, -0.11755, 0.56295], [0.07410000000000005, -0.4426, 0.5428, -0.0774]], [[1.0, 0.0, 0.0, 0.0], [0.02045, -0.32325, -0.24675, -0.18465], [0.00305, -0.17315, -0.08035, 0.37955], [0.07440000000000002, -0.4476, 0.5033, -0.0813]], [[1.0, 0.0, 0.0, 0.0], [0.01295, -0.20585, -0.13385, -0.10815], [0.001, -0.1013, -0.0464, 0.2119], [0.07485000000000003, -0.45435, 0.45075, -0.08515]], [[1.0, 0.0, 0.0, 0.0], [0.0069, -0.1104, -0.0563, -0.0517], [-0.0005, -0.047, -0.0224, 0.0942], [0.07529999999999998, -0.4622, 0.3879, -0.0882]], [[1.0, 0.0, 0.0, 0.0], [0.0527, -0.4467, -0.414, -0.2851], [0.0103, -0.2638, -0.1284, 0.6198], [0.11155000000000004, -0.33555, 0.44645, -0.05545]], [[1.0, 0.0, 0.0, 0.0], [0.044, -0.3738, -0.3271, -0.2306], [0.00785000000000002, -0.21445, -0.10275, 0.49325], [0.11185, -0.33795, 0.42695, -0.05805]], [[1.0, 0.0, 0.0, 0.0], [0.03225, -0.27495, -0.21745, -0.16015], [0.00455, -0.14995, -0.07005, 0.33265], [0.11230000000000001, -0.3418, 0.3958, -0.0614]], [[1.0, 0.0, 0.0, 0.0], [0.0204, -0.175, -0.1182, -0.0937], [0.00140000000000001, -0.088, -0.0403, 0.1857], [0.1129, -0.3471, 0.3543, -0.0647]], [[1.0, 0.0, 0.0, 0.0], [0.01085, -0.09365, -0.04975, -0.04475], [-0.001, -0.041, -0.0193, 0.0825], [0.11359999999999998, -0.3534, 0.3047, -0.0673]], [[1.0, 0.0, 0.0, 0.0], [0.07025, -0.36185, -0.35015, -0.23705], [0.0132, -0.2182, -0.1078, 0.5215], [0.13935000000000003, -0.23555, 0.32635, -0.03755]], [[1.0, 0.0, 0.0, 0.0], [0.0586, -0.3026, -0.2767, -0.1916], [0.00994999999999996, -0.17755, -0.08605, 0.41505], [0.13969999999999994, -0.2373, 0.312, -0.0396]], [[1.0, 0.0, 0.0, 0.0], [0.04295, -0.22255, -0.18415, -0.13295], [0.00564999999999999, -0.12425, -0.05865, 0.27985], [0.14030000000000004, -0.2402, 0.289, -0.0424]], [[1.0, 0.0, 0.0, 0.0], [0.02725, -0.14145, -0.10035, -0.07765], [0.00139999999999998, -0.0729, -0.0335, 0.1562], [0.14100000000000001, -0.2441, 0.2586, -0.0451]], [[1.0, 0.0, 0.0, 0.0], [0.01455, -0.07565, -0.04245, -0.03705], [-0.00170000000000001, -0.0342, -0.0159, 0.0693], [0.14185000000000003, -0.24875, 0.22215, -0.04725]]]
set_12 = [[[1.0, 0.0, 0.0, 0.0], [-0.000650000000000012, 0.87555, -0.21435, -0.20175], [-0.00459999999999994, 0.0945999999999999, -0.3485, 0.8654], [-0.0037000000000000366, -0.277, -0.8222, -0.3187]], [[1.0, 0.0, 0.0, 0.0], [-0.000750000000000001, 0.73545, -0.16555, -0.19825], [-0.00464999999999993, 0.0621499999999999, -0.27975, 0.79205], [-0.0035499999999999976, -0.23995, -0.68215, -0.29675]], [[1.0, 0.0, 0.0, 0.0], [-0.000750000000000001, 0.54555, -0.10505, -0.18665], [-0.00480000000000003, 0.0204, -0.1922, 0.6808], [-0.0033000000000000806, -0.1855, -0.494, -0.2643]], [[1.0, 0.0, 0.0, 0.0], [-0.000750000000000001, 0.35265, -0.05215, -0.16265], [-0.00490000000000002, -0.0175999999999999, -0.1118, 0.5463], [-0.002950000000000008, -0.12305, -0.30615, -0.22665]], [[1.0, 0.0, 0.0, 0.0], [-0.000599999999999989, 0.1938, -0.0181, -0.1263], [-0.00479999999999997, -0.0413, -0.0546, 0.4056], [-0.002650000000000041, -0.06325, -0.15615, -0.18915]], [[1.0, 0.0, 0.0, 0.0], [-0.002, 0.8176, -0.2042, -0.1808], [-0.01925, 0.09275, -0.32915, 0.79115], [-0.014599999999999946, -0.2565, -0.7699, -0.2903]], [[1.0, 0.0, 0.0, 0.0], [-0.00225, 0.68655, -0.15785, -0.17835], [-0.0196, 0.0621999999999999, -0.264, 0.724], [-0.013949999999999962, -0.22245, -0.63875, -0.27005]], [[1.0, 0.0, 0.0, 0.0], [-0.0025, 0.5091, -0.1003, -0.1687], [-0.01995, 0.02275, -0.18125, 0.62215], [-0.012950000000000017, -0.17235, -0.46265, -0.24015]], [[1.0, 0.0, 0.0, 0.0], [-0.0025, 0.3289, -0.05, -0.1477], [-0.02, -0.0133, -0.1053, 0.499], [-0.011749999999999983, -0.11475, -0.28675, -0.20555]], [[1.0, 0.0, 0.0, 0.0], [-0.00209999999999999, 0.1806, -0.0175, -0.1151], [-0.0194, -0.0361, -0.0513, 0.3702], [-0.010499999999999954, -0.0596, -0.1464, -0.1711]], [[1.0, 0.0, 0.0, 0.0], [-0.00205, 0.72875, -0.18815, -0.14995], [-0.0463, 0.0889, -0.2991, 0.6804], [-0.032350000000000045, -0.22525, -0.68945, -0.24815]], [[1.0, 0.0, 0.0, 0.0], [-0.0028, 0.6117, -0.1456, -0.149], [-0.0467000000000001, 0.0614000000000001, -0.2397, 0.6225], [-0.030950000000000033, -0.19565, -0.57205, -0.23055]], [[1.0, 0.0, 0.0, 0.0], [-0.00355, 0.45325, -0.09285, -0.14215], [-0.0468999999999999, 0.0256, -0.1644, 0.5347], [-0.02889999999999998, -0.1521, -0.4144, -0.2046]], [[1.0, 0.0, 0.0, 0.0], [-0.00389999999999999, 0.2925, -0.0465, -0.1255], [-0.04635, -0.00734999999999997, -0.09515, 0.42855], [-0.02645000000000003, -0.10185, -0.25685, -0.17455]], [[1.0, 0.0, 0.0, 0.0], [-0.00359999999999999, 0.1604, -0.0165, -0.0985], [-0.0443, -0.0287, -0.0461, 0.3175], [-0.023900000000000032, -0.0537, -0.1313, -0.1446]], [[1.0, 0.0, 0.0, 0.0], [0.00144999999999999, 0.61925, -0.16735, -0.11425], [-0.0879, 0.0827, -0.2611, 0.5496], [-0.056599999999999984, -0.1872, -0.5896, -0.1989]], [[1.0, 0.0, 0.0, 0.0], [-0.00025, 0.51945, -0.12975, -0.11485], [-0.0877500000000001, 0.0589500000000001, -0.20915, 0.50265], [-0.054450000000000054, -0.16295, -0.48915, -0.18445]], [[1.0, 0.0, 0.0, 0.0], [-0.00220000000000001, 0.3845, -0.083, -0.1112], [-0.08685, 0.02785, -0.14315, 0.43145], [-0.05119999999999997, -0.1272, -0.3544, -0.1631]], [[1.0, 0.0, 0.0, 0.0], [-0.00360000000000001, 0.2478, -0.0419, -0.0994], [-0.08435, -0.00114999999999998, -0.08265, 0.34545], [-0.04735, -0.08585, -0.21975, -0.13845]], [[1.0, 0.0, 0.0, 0.0], [-0.0039, 0.1356, -0.0151, -0.0789], [-0.0794, -0.0203, -0.0396, 0.2555], [-0.04335, -0.04615, -0.11245, -0.11405]], [[1.0, 0.0, 0.0, 0.0], [0.01005, 0.50085, -0.14345, -0.07905], [-0.1442, 0.0737999999999999, -0.2188, 0.4157], [-0.08759999999999996, -0.1467, -0.4804, -0.1492]], [[1.0, 0.0, 0.0, 0.0], [0.00695, 0.41975, -0.11135, -0.08095], [-0.1426, 0.0542, -0.1751, 0.3801], [-0.0847, -0.1281, -0.3987, -0.138]], [[1.0, 0.0, 0.0, 0.0], [0.00310000000000001, 0.3103, -0.0716, -0.0801], [-0.1393, 0.0284, -0.1196, 0.326], [-0.08040000000000003, -0.1004, -0.2889, -0.1215]], [[1.0, 0.0, 0.0, 0.0], [-0.00025, 0.19955, -0.03645, -0.07305], [-0.13325, 0.00414999999999999, -0.06865, 0.26065], [-0.07529999999999998, -0.0685, -0.1792, -0.1025]], [[1.0, 0.0, 0.0, 0.0], [-0.00215, 0.10895, -0.01345, -0.05885], [-0.1234, -0.0125, -0.0326, 0.1923], [-0.07000000000000006, -0.0376, -0.0917, -0.0837]]]
set_13 = [[[1.0, 0.0, 0.0, 0.0], [-0.00214999999999999, 0.75775, 0.36985, 0.36085], [-0.00805, 0.22695, -0.82265, 0.38085], [-0.003999999999999837, 0.4882, -0.2386, -0.7596]], [[1.0, 0.0, 0.0, 0.0], [-0.00205, 0.62965, 0.29045, 0.28825], [-0.00729999999999997, 0.225, -0.7308, 0.3139], [-0.0040000000000000036, 0.4514, -0.2361, -0.6488]], [[1.0, 0.0, 0.0, 0.0], [-0.00184999999999999, 0.45865, 0.18825, 0.19605], [-0.00619999999999998, 0.2137, -0.5973, 0.2264], [-0.003950000000000065, 0.39605, -0.23165, -0.49295]], [[1.0, 0.0, 0.0, 0.0], [-0.00155, 0.28895, 0.09375, 0.11145], [-0.00490000000000002, 0.1871, -0.4456, 0.1421], [-0.0039000000000000146, 0.3301, -0.2249, -0.3258]], [[1.0, 0.0, 0.0, 0.0], [-0.00115, 0.15395, 0.02815, 0.05155], [-0.00359999999999999, 0.1451, -0.2993, 0.0772], [-0.0037000000000000366, 0.2613, -0.2146, -0.1787]], [[1.0, 0.0, 0.0, 0.0], [-0.0081, 0.7095, 0.3508, 0.3412], [-0.0297, 0.2028, -0.7576, 0.3574], [-0.015099999999999947, 0.4454, -0.2139, -0.7062]], [[1.0, 0.0, 0.0, 0.0], [-0.00774999999999998, 0.58935, 0.27585, 0.27245], [-0.02695, 0.20205, -0.67285, 0.29435], [-0.014999999999999958, 0.4115, -0.2117, -0.6033]], [[1.0, 0.0, 0.0, 0.0], [-0.00700000000000001, 0.4291, 0.1794, 0.1852], [-0.02295, 0.19305, -0.54965, 0.21205], [-0.01485000000000003, 0.36075, -0.20775, -0.45865]], [[1.0, 0.0, 0.0, 0.0], [-0.0058, 0.2701, 0.09, 0.1052], [-0.0182, 0.1699, -0.4098, 0.1328], [-0.014499999999999957, 0.3003, -0.2017, -0.3034]], [[1.0, 0.0, 0.0, 0.0], [-0.00430000000000001, 0.1438, 0.0278, 0.0486], [-0.0132, 0.1323, -0.2751, 0.0719], [-0.013900000000000023, 0.2373, -0.1926, -0.1668]], [[1.0, 0.0, 0.0, 0.0], [-0.0164, 0.6352, 0.3207, 0.3104], [-0.0580000000000001, 0.1672, -0.6599, 0.3213], [-0.030750000000000055, 0.38175, -0.17815, -0.62465]], [[1.0, 0.0, 0.0, 0.0], [-0.01565, 0.52745, 0.25275, 0.24785], [-0.0527, 0.1681, -0.5858, 0.2643], [-0.03059999999999996, 0.3524, -0.1762, -0.5338]], [[1.0, 0.0, 0.0, 0.0], [-0.0141, 0.3837, 0.1652, 0.1684], [-0.0448, 0.1624, -0.4782, 0.19], [-0.030250000000000055, 0.30845, -0.17305, -0.40615]], [[1.0, 0.0, 0.0, 0.0], [-0.0117, 0.2412, 0.0839, 0.0955], [-0.03545, 0.14435, -0.35605, 0.11855], [-0.029500000000000026, 0.2562, -0.1681, -0.2691]], [[1.0, 0.0, 0.0, 0.0], [-0.00865, 0.12815, 0.02695, 0.04405], [-0.02575, 0.11325, -0.23865, 0.06385], [-0.02839999999999998, 0.202, -0.1606, -0.1485]], [[1.0, 0.0, 0.0, 0.0], [-0.0251, 0.5433, 0.2819, 0.2716], [-0.08395, 0.12625, -0.54255, 0.27645], [-0.04824999999999996, 0.30695, -0.13735, -0.52475]], [[1.0, 0.0, 0.0, 0.0], [-0.0239, 0.4508, 0.2229, 0.2168], [-0.0762, 0.1288, -0.4814, 0.2271], [-0.04800000000000004, 0.283, -0.1359, -0.4487]], [[1.0, 0.0, 0.0, 0.0], [-0.0215, 0.3275, 0.1466, 0.1471], [-0.06475, 0.12665, -0.39245, 0.16275], [-0.04750000000000004, 0.2472, -0.1334, -0.3417]], [[1.0, 0.0, 0.0, 0.0], [-0.0178, 0.2055, 0.0756, 0.0833], [-0.0511, 0.1142, -0.2919, 0.1011], [-0.046499999999999986, 0.2047, -0.1297, -0.2269]], [[1.0, 0.0, 0.0, 0.0], [-0.0131, 0.1088, 0.0256, 0.0383], [-0.0371, 0.0907, -0.1952, 0.054], [-0.04479999999999995, 0.1607, -0.124, -0.1258]], [[1.0, 0.0, 0.0, 0.0], [-0.03225, 0.44305, 0.23775, 0.22815], [-0.0987, 0.0857, -0.4204, 0.2274], [-0.06610000000000005, 0.231, -0.0979, -0.4177]], [[1.0, 0.0, 0.0, 0.0], [-0.03065, 0.36725, 0.18855, 0.18195], [-0.0895, 0.0898, -0.3726, 0.1864], [-0.06584999999999996, 0.21265, -0.0967499999999999, -0.35735]], [[1.0, 0.0, 0.0, 0.0], [-0.0275, 0.2664, 0.125, 0.1234], [-0.07585, 0.09075, -0.30335, 0.13315], [-0.06519999999999998, 0.1852, -0.095, -0.2725]], [[1.0, 0.0, 0.0, 0.0], [-0.02265, 0.16675, 0.06555, 0.06975], [-0.05975, 0.08375, -0.22515, 0.08215], [-0.06394999999999995, 0.15275, -0.09245, -0.18135]], [[1.0, 0.0, 0.0, 0.0], [-0.0166, 0.088, 0.0234, 0.0319], [-0.0433, 0.0678, -0.1502, 0.0435], [-0.06190000000000001, 0.1193, -0.0885, -0.1011]]]
set_14 = [[[1.0, 0.0, 0.0, 0.0], [0.00145000000000006, -0.53655, -0.54255, -0.50645], [0.01405, -0.42245, -0.30015, 0.77155], [-0.00045000000000006146, -0.62355, 0.69805, -0.08975]], [[1.0, 0.0, 0.0, 0.0], [0.0015, -0.437, -0.4389, -0.4139], [0.01245, -0.34545, -0.27795, 0.67045], [-0.0004999999999999449, -0.5516, 0.641, -0.1096]], [[1.0, 0.0, 0.0, 0.0], [0.0015, -0.3068, -0.3042, -0.292], [0.0101, -0.2451, -0.2384, 0.5276], [-0.0006500000000000394, -0.44525, 0.55615, -0.13825]], [[1.0, 0.0, 0.0, 0.0], [0.0013, -0.1819, -0.1764, -0.1744], [0.00745000000000001, -0.14905, -0.18355, 0.37245], [-0.0007999999999999674, -0.3218, 0.4561, -0.1702]], [[1.0, 0.0, 0.0, 0.0], [0.00105, -0.08775, -0.08185, -0.08555], [0.00490000000000002, -0.0761, -0.1219, 0.2319], [-0.0008999999999999564, -0.2007, 0.3537, -0.1994]], [[1.0, 0.0, 0.0, 0.0], [0.00584999999999997, -0.50485, -0.51125, -0.47605], [0.0534, -0.3971, -0.2731, 0.7145], [-0.0013499999999999623, -0.57465, 0.63785, -0.07555]], [[1.0, 0.0, 0.0, 0.0], [0.00600000000000001, -0.4111, -0.4136, -0.389], [0.0473, -0.3246, -0.2536, 0.6207], [-0.0017000000000000348, -0.5085, 0.5854, -0.0939]], [[1.0, 0.0, 0.0, 0.0], [0.00589999999999996, -0.2886, -0.2867, -0.2745], [0.0384, -0.2301, -0.2182, 0.4882], [-0.0021500000000000408, -0.41065, 0.50735, -0.12035]], [[1.0, 0.0, 0.0, 0.0], [0.0053, -0.1711, -0.1664, -0.1639], [0.02825, -0.13975, -0.16855, 0.34435], [-0.002650000000000041, -0.29715, 0.41555, -0.14985]], [[1.0, 0.0, 0.0, 0.0], [0.00415, -0.08255, -0.07725, -0.08035], [0.0185, -0.0712, -0.1122, 0.2142], [-0.003149999999999986, -0.18555, 0.32175, -0.17695]], [[1.0, 0.0, 0.0, 0.0], [0.01315, -0.45565, -0.46265, -0.42895], [0.1103, -0.3581, -0.2328, 0.628], [-0.0018000000000000238, -0.501, 0.5482, -0.0558]], [[1.0, 0.0, 0.0, 0.0], [0.01335, -0.37105, -0.37425, -0.35055], [0.09765, -0.29245, -0.21705, 0.54525], [-0.0024999999999999467, -0.4435, 0.5026, -0.0718]], [[1.0, 0.0, 0.0, 0.0], [0.013, -0.2604, -0.2595, -0.2474], [0.07925, -0.20705, -0.18795, 0.42855], [-0.003449999999999953, -0.35845, 0.43495, -0.09495]], [[1.0, 0.0, 0.0, 0.0], [0.0116, -0.1544, -0.1507, -0.1477], [0.0583, -0.1255, -0.146, 0.3019], [-0.0044999999999999485, -0.2597, 0.3554, -0.1208]], [[1.0, 0.0, 0.0, 0.0], [0.00905, -0.07445, -0.07005, -0.07225], [0.0382, -0.0638, -0.0977, 0.1875], [-0.005400000000000016, -0.1627, 0.2744, -0.1446]], [[1.0, 0.0, 0.0, 0.0], [0.02315, -0.39395, -0.40155, -0.37005], [0.1737, -0.3093, -0.1848, 0.5232], [-0.0016000000000000458, -0.4123, 0.4425, -0.0348]], [[1.0, 0.0, 0.0, 0.0], [0.02325, -0.32075, -0.32485, -0.30245], [0.1538, -0.2523, -0.1736, 0.454], [-0.002650000000000041, -0.36515, 0.40525, -0.04805]], [[1.0, 0.0, 0.0, 0.0], [0.0224, -0.2251, -0.2253, -0.2135], [0.12485, -0.17835, -0.15175, 0.35635], [-0.0041500000000000425, -0.29545, 0.34995, -0.06715]], [[1.0, 0.0, 0.0, 0.0], [0.0198, -0.1334, -0.1309, -0.1274], [0.09185, -0.10775, -0.11895, 0.25065], [-0.005700000000000038, -0.2145, 0.285, -0.0886]], [[1.0, 0.0, 0.0, 0.0], [0.01535, -0.06435, -0.06095, -0.06225], [0.06015, -0.05455, -0.08025, 0.15525], [-0.007050000000000001, -0.13495, 0.21915, -0.10845]], [[1.0, 0.0, 0.0, 0.0], [0.0354, -0.3258, -0.3337, -0.305], [0.23205, -0.25555, -0.13545, 0.41235], [-0.0021500000000000408, -0.31935, 0.33485, -0.01655]], [[1.0, 0.0, 0.0, 0.0], [0.03515, -0.26515, -0.26995, -0.24935], [0.20545, -0.20825, -0.12875, 0.35745], [-0.003449999999999953, -0.28305, 0.30605, -0.02685]], [[1.0, 0.0, 0.0, 0.0], [0.0334, -0.186, -0.1872, -0.176], [0.1667, -0.1468, -0.1141, 0.2801], [-0.005300000000000027, -0.2294, 0.2635, -0.0418]], [[1.0, 0.0, 0.0, 0.0], [0.02925, -0.11025, -0.10885, -0.10505], [0.12265, -0.08845, -0.09075, 0.19655], [-0.007349999999999968, -0.16685, 0.21375, -0.05855]], [[1.0, 0.0, 0.0, 0.0], [0.02255, -0.05315, -0.05075, -0.05125], [0.0803, -0.0445, -0.062, 0.1214], [-0.008999999999999952, -0.1055, 0.1634, -0.0741]]]
set_15 = [[[1.0, 0.0, 0.0, 0.0], [0.00284999999999999, 0.69455, -0.58675, -0.08095], [-0.00359999999999994, 0.000199999999999978, 0.1192, -0.923], [-0.015600000000000003, 0.6054, 0.7131, 0.114]], [[1.0, 0.0, 0.0, 0.0], [0.0025, 0.5495, -0.4792, -0.0757], [-0.00314999999999999, -0.01645, 0.06965, -0.80695], [-0.015300000000000036, 0.5424, 0.6555, 0.1464]], [[1.0, 0.0, 0.0, 0.0], [0.00205, 0.36635, -0.33845, -0.06685], [-0.00244999999999995, -0.0343500000000001, 0.0106499999999999, -0.64355], [-0.01479999999999998, 0.4503, 0.5696, 0.1917]], [[1.0, 0.0, 0.0, 0.0], [0.0015, 0.2004, -0.2031, -0.0548], [-0.00164999999999998, -0.04465, -0.03605, -0.46735], [-0.014100000000000001, 0.3447, 0.4668, 0.2395]], [[1.0, 0.0, 0.0, 0.0], [0.001, 0.0857, -0.1004, -0.0405], [-0.000950000000000006, -0.04335, -0.05745, -0.30985], [-0.013300000000000034, 0.2414, 0.3584, 0.2796]], [[1.0, 0.0, 0.0, 0.0], [0.01095, 0.65805, -0.55165, -0.07355], [-0.01405, 0.00465000000000004, 0.11935, -0.85355], [-0.05994999999999995, 0.55645, 0.65135, 0.09415]], [[1.0, 0.0, 0.0, 0.0], [0.00964999999999999, 0.52075, -0.45045, -0.06885], [-0.01225, -0.0115500000000001, 0.0721499999999999, -0.74585], [-0.058750000000000024, 0.49825, 0.59845, 0.12425]], [[1.0, 0.0, 0.0, 0.0], [0.00779999999999999, 0.3472, -0.318, -0.0609], [-0.00969999999999993, -0.0291000000000001, 0.0155, -0.5942], [-0.05685000000000001, 0.41335, 0.51955, 0.16635]], [[1.0, 0.0, 0.0, 0.0], [0.00575, 0.19005, -0.19075, -0.04995], [-0.00675000000000003, -0.03955, -0.02955, -0.43075], [-0.054400000000000004, 0.3163, 0.4254, 0.2108]], [[1.0, 0.0, 0.0, 0.0], [0.00375, 0.08135, -0.09425, -0.03705], [-0.00390000000000001, -0.0391, -0.0509, -0.2847], [-0.051449999999999996, 0.22115, 0.32635, 0.24825]], [[1.0, 0.0, 0.0, 0.0], [0.0232, 0.6009, -0.4974, -0.0626], [-0.03065, 0.0108499999999999, 0.11845, -0.74865], [-0.12634999999999996, 0.48305, 0.55955, 0.06655]], [[1.0, 0.0, 0.0, 0.0], [0.02055, 0.47555, -0.40605, -0.05865], [-0.0269499999999999, -0.00455000000000005, 0.0747499999999999, -0.65355], [-0.12395, 0.43225, 0.51365, 0.09315]], [[1.0, 0.0, 0.0, 0.0], [0.01665, 0.31725, -0.28645, -0.05205], [-0.0216, -0.0214, 0.0222, -0.5198], [-0.12010000000000004, 0.3582, 0.4453, 0.1304]], [[1.0, 0.0, 0.0, 0.0], [0.01225, 0.17375, -0.17165, -0.04285], [-0.0154, -0.0321, -0.0203, -0.3757], [-0.11499999999999999, 0.2736, 0.364, 0.1698]], [[1.0, 0.0, 0.0, 0.0], [0.008, 0.0745, -0.0848, -0.0318], [-0.00935000000000002, -0.03285, -0.04135, -0.24705], [-0.1089, 0.1909, 0.2788, 0.2031]], [[1.0, 0.0, 0.0, 0.0], [0.038, 0.5283, -0.4296, -0.0497], [-0.05235, 0.01715, 0.11485, -0.62195], [-0.20534999999999998, 0.39555, 0.45145, 0.03755]], [[1.0, 0.0, 0.0, 0.0], [0.03365, 0.41815, -0.35045, -0.04675], [-0.0465, 0.003, 0.0758, -0.5423], [-0.2016, 0.3536, 0.4138, 0.0598]], [[1.0, 0.0, 0.0, 0.0], [0.02735, 0.27905, -0.24715, -0.04155], [-0.0379, -0.0129, 0.0286, -0.4303], [-0.1956, 0.2925, 0.358, 0.0911]], [[1.0, 0.0, 0.0, 0.0], [0.0201, 0.153, -0.1479, -0.0344], [-0.02795, -0.02355, -0.01015, -0.30975], [-0.18775000000000003, 0.22295, 0.29195, 0.12425]], [[1.0, 0.0, 0.0, 0.0], [0.01315, 0.06565, -0.07295, -0.02565], [-0.018, -0.0256, -0.0305, -0.2023], [-0.17825000000000002, 0.15505, 0.22305, 0.15245]], [[1.0, 0.0, 0.0, 0.0], [0.0533, 0.4462, -0.3547, -0.0367], [-0.07825, 0.02205, 0.10755, -0.48855], [-0.28669999999999995, 0.3048, 0.3415, 0.0127]], [[1.0, 0.0, 0.0, 0.0], [0.04725, 0.35325, -0.28925, -0.03465], [-0.07035, 0.00954999999999995, 0.0740499999999999, -0.42535], [-0.28180000000000005, 0.2721, 0.3124, 0.0304]], [[1.0, 0.0, 0.0, 0.0], [0.0384, 0.2359, -0.2038, -0.031], [-0.0585, -0.00490000000000002, 0.0331, -0.3365], [-0.2739500000000001, 0.22455, 0.26945, 0.05525]], [[1.0, 0.0, 0.0, 0.0], [0.02835, 0.12935, -0.12185, -0.02575], [-0.04465, -0.01515, -0.00104999999999997, -0.24095], [-0.26354999999999995, 0.17055, 0.21895, 0.08165]], [[1.0, 0.0, 0.0, 0.0], [0.01855, 0.05565, -0.05995, -0.01935], [-0.0305, -0.0183, -0.0199, -0.156], [-0.25105, 0.11825, 0.16675, 0.10435]]]
set_16 = [[[1.0, 0.0, 0.0, 0.0], [-0.00639999999999996, 0.0253, 0.5765, -0.7138], [-0.00215000000000001, -0.87975, 0.22175, 0.14545], [0.007050000000000001, 0.26495, 0.70095, 0.58225]], [[1.0, 0.0, 0.0, 0.0], [-0.00549999999999995, 0.0155, 0.4679, -0.5904], [-0.00215, -0.72175, 0.19365, 0.12505], [0.006950000000000012, 0.23465, 0.65575, 0.55715]], [[1.0, 0.0, 0.0, 0.0], [-0.00424999999999998, 0.00474999999999998, 0.32665, -0.42535], [-0.0021, -0.5132, 0.1516, 0.0962], [0.006749999999999867, 0.18855, 0.58485, 0.51685]], [[1.0, 0.0, 0.0, 0.0], [-0.00295000000000001, -0.00264999999999999, 0.19215, -0.26155], [-0.00194999999999999, -0.31035, 0.10335, 0.06485], [0.006500000000000006, 0.1332, 0.4943, 0.4637]], [[1.0, 0.0, 0.0, 0.0], [-0.00185000000000002, -0.00484999999999999, 0.09175, -0.13275], [-0.0017, -0.1543, 0.0584, 0.0368], [0.006199999999999983, 0.0766, 0.3918, 0.4009]], [[1.0, 0.0, 0.0, 0.0], [-0.0244, 0.0253, 0.5429, -0.6689], [-0.00840000000000002, -0.8262, 0.2049, 0.135], [0.02635000000000004, 0.24405, 0.63735, 0.52645]], [[1.0, 0.0, 0.0, 0.0], [-0.021, 0.0158, 0.4406, -0.5533], [-0.00845, -0.67775, 0.17915, 0.11615], [0.025899999999999868, 0.2163, 0.5962, 0.5037]], [[1.0, 0.0, 0.0, 0.0], [-0.0162, 0.00519999999999998, 0.3075, -0.3986], [-0.00835, -0.48185, 0.14055, 0.08935], [0.025249999999999995, 0.17415, 0.53165, 0.46725]], [[1.0, 0.0, 0.0, 0.0], [-0.0113, -0.002, 0.1808, -0.2452], [-0.00775, -0.29135, 0.09605, 0.06025], [0.02425000000000005, 0.12345, 0.44925, 0.41905]], [[1.0, 0.0, 0.0, 0.0], [-0.00705, -0.00434999999999999, 0.08625, -0.12445], [-0.0066, -0.1448, 0.0544, 0.0342], [0.023099999999999954, 0.0715, 0.3561, 0.3621]], [[1.0, 0.0, 0.0, 0.0], [-0.05085, 0.0250499999999999, 0.49075, -0.59975], [-0.01835, -0.74345, 0.17925, 0.11905], [0.05285000000000001, 0.21245, 0.54315, 0.44445]], [[1.0, 0.0, 0.0, 0.0], [-0.0437, 0.0161, 0.3981, -0.4961], [-0.01845, -0.60975, 0.15705, 0.10245], [0.052000000000000046, 0.1885, 0.508, 0.4252]], [[1.0, 0.0, 0.0, 0.0], [-0.0339, 0.00600000000000001, 0.2778, -0.3575], [-0.0181, -0.4334, 0.1236, 0.0789], [0.05055000000000004, 0.15225, 0.45295, 0.39435]], [[1.0, 0.0, 0.0, 0.0], [-0.0237, -0.001, 0.1634, -0.2199], [-0.0168, -0.2619, 0.0849, 0.0533], [0.04859999999999998, 0.1084, 0.3827, 0.3535]], [[1.0, 0.0, 0.0, 0.0], [-0.0149, -0.00360000000000001, 0.078, -0.1116], [-0.0143, -0.1302, 0.0483, 0.0303], [0.04620000000000002, 0.0635000000000001, 0.3032, 0.3052]], [[1.0, 0.0, 0.0, 0.0], [-0.081, 0.0244, 0.4252, -0.5137], [-0.03135, -0.64025, 0.14805, 0.09965], [0.07919999999999999, 0.1743, 0.433, 0.3497]], [[1.0, 0.0, 0.0, 0.0], [-0.0697, 0.016, 0.3449, -0.425], [-0.0314, -0.525, 0.1301, 0.0858], [0.07784999999999997, 0.15505, 0.40495, 0.33445]], [[1.0, 0.0, 0.0, 0.0], [-0.0542, 0.00669999999999998, 0.2406, -0.3063], [-0.0306, -0.373, 0.1029, 0.0661], [0.07565, 0.12565, 0.36095, 0.31005]], [[1.0, 0.0, 0.0, 0.0], [-0.03805, -4.99999999999945e-05, 0.14135, -0.18845], [-0.02825, -0.22535, 0.07105, 0.04475], [0.07265000000000005, 0.09005, 0.30485, 0.27775]], [[1.0, 0.0, 0.0, 0.0], [-0.02415, -0.00265, 0.06755, -0.09565], [-0.02395, -0.11195, 0.04065, 0.02555], [0.06895000000000001, 0.05345, 0.24145, 0.23955]], [[1.0, 0.0, 0.0, 0.0], [-0.10985, 0.02305, 0.35265, -0.41955], [-0.0465, -0.5265, 0.115, 0.0789], [0.09729999999999994, 0.1344, 0.3219, 0.2556]], [[1.0, 0.0, 0.0, 0.0], [-0.09475, 0.01565, 0.28595, -0.34715], [-0.04625, -0.43165, 0.10155, 0.06795], [0.09559999999999996, 0.1198, 0.301, 0.2444]], [[1.0, 0.0, 0.0, 0.0], [-0.074, 0.00719999999999998, 0.1994, -0.2502], [-0.0448, -0.3066, 0.0808, 0.0524], [0.0927, 0.0975, 0.2682, 0.2264]], [[1.0, 0.0, 0.0, 0.0], [-0.0523, 0.00100000000000003, 0.1171, -0.154], [-0.04105, -0.18515, 0.05625, 0.03555], [0.08875000000000005, 0.07045, 0.22645, 0.20265]], [[1.0, 0.0, 0.0, 0.0], [-0.03345, -0.00175, 0.05585, -0.07815], [-0.0347, -0.0919, 0.0325, 0.0204], [0.08390000000000003, 0.0426, 0.1792, 0.1745]]]
set_17 = [[[1.0, 0.0, 0.0, 0.0], [-0.00690000000000002, 0.4173, -0.2159, -0.7872], [0.00164999999999998, -0.10175, -0.88535, 0.19195], [0.02184999999999998, -0.85465, 4.99999999999945e-05, -0.43245]], [[1.0, 0.0, 0.0, 0.0], [-0.00609999999999999, 0.3676, -0.1721, -0.6305], [0.00145000000000001, -0.08955, -0.70575, 0.15375], [0.02174999999999999, -0.85025, 4.99999999999945e-05, -0.39175]], [[1.0, 0.0, 0.0, 0.0], [-0.005, 0.2959, -0.1164, -0.4296], [0.00125, -0.07215, -0.47725, 0.10475], [0.021549999999999958, -0.84235, 4.99999999999945e-05, -0.33105]], [[1.0, 0.0, 0.0, 0.0], [-0.00370000000000001, 0.2153, -0.0652, -0.2431], [0.000899999999999998, -0.0525, -0.2674, 0.0593], [0.02134999999999998, -0.83015, 4.99999999999945e-05, -0.25945]], [[1.0, 0.0, 0.0, 0.0], [-0.0024, 0.1389, -0.0291, -0.1094], [0.0006, -0.0339, -0.1192, 0.0267], [0.021050000000000013, -0.81255, -4.99999999999945e-05, -0.18715]], [[1.0, 0.0, 0.0, 0.0], [-0.02615, 0.38505, -0.20415, -0.74355], [0.00639999999999999, -0.0939, -0.8374, 0.1813], [0.08115000000000006, -0.76535, -4.99999999999945e-05, -0.39635]], [[1.0, 0.0, 0.0, 0.0], [-0.02325, 0.33915, -0.16275, -0.59555], [0.00565000000000002, -0.08265, -0.66745, 0.14525], [0.08075000000000004, -0.76145, 4.99999999999945e-05, -0.35895]], [[1.0, 0.0, 0.0, 0.0], [-0.019, 0.2729, -0.11, -0.4059], [0.00465, -0.06655, -0.45135, 0.09895], [0.08014999999999994, -0.75445, -4.99999999999945e-05, -0.30325]], [[1.0, 0.0, 0.0, 0.0], [-0.0141, 0.1986, -0.0616, -0.2297], [0.0034, -0.0484, -0.2529, 0.056], [0.07925000000000004, -0.74355, -5.00000000000222e-05, -0.23755]], [[1.0, 0.0, 0.0, 0.0], [-0.0092, 0.128, -0.0275, -0.1034], [0.00225, -0.03125, -0.11275, 0.02525], [0.07804999999999995, -0.72775, -4.99999999999945e-05, -0.17115]], [[1.0, 0.0, 0.0, 0.0], [-0.0539000000000001, 0.3365, -0.1859, -0.6756], [0.01315, -0.08205, -0.76235, 0.16475], [0.16090000000000004, -0.6358, 0.0, -0.3424]], [[1.0, 0.0, 0.0, 0.0], [-0.0479499999999999, 0.29625, -0.14815, -0.54115], [0.0117, -0.0723, -0.6077, 0.132], [0.16015000000000001, -0.63255, 4.99999999999945e-05, -0.30995]], [[1.0, 0.0, 0.0, 0.0], [-0.03915, 0.23825, -0.10025, -0.36885], [0.00955, -0.05815, -0.41085, 0.08995], [0.15890000000000004, -0.6268, 5.55111512312578e-17, -0.2617]], [[1.0, 0.0, 0.0, 0.0], [-0.029, 0.1733, -0.0561, -0.2088], [0.00705, -0.04225, -0.23025, 0.05095], [0.15715000000000007, -0.61785, -4.99999999999945e-05, -0.20475]], [[1.0, 0.0, 0.0, 0.0], [-0.019, 0.1117, -0.025, -0.0941], [0.00465, -0.02725, -0.10265, 0.02295], [0.15474999999999994, -0.60485, 4.99999999999945e-05, -0.14725]], [[1.0, 0.0, 0.0, 0.0], [-0.0848, 0.278, -0.1627, -0.5897], [0.0207, -0.0678, -0.6675, 0.1438], [0.23869999999999997, -0.4887, 0.0, -0.2783]], [[1.0, 0.0, 0.0, 0.0], [-0.0754, 0.2446, -0.1298, -0.4724], [0.0184, -0.0597, -0.532, 0.1152], [0.23760000000000003, -0.4863, 0.0, -0.2518]], [[1.0, 0.0, 0.0, 0.0], [-0.0616, 0.1966, -0.0877, -0.3221], [0.01505, -0.04795, -0.35975, 0.07855], [0.2358, -0.482, 0.0, -0.2124]], [[1.0, 0.0, 0.0, 0.0], [-0.0456, 0.1428, -0.0491, -0.1824], [0.0111, -0.0348, -0.2016, 0.0445], [0.23320000000000002, -0.4752, 0.0, -0.1659]], [[1.0, 0.0, 0.0, 0.0], [-0.02985, 0.09195, -0.02195, -0.08225], [0.00725, -0.02235, -0.08985, 0.02005], [0.22970000000000002, -0.4653, -1.38777878078145e-17, -0.119]], [[1.0, 0.0, 0.0, 0.0], [-0.1131, 0.2167, -0.1368, -0.4935], [0.02755, -0.05285, -0.56085, 0.12035], [0.29315, -0.34635, -4.99999999999667e-05, -0.21225]], [[1.0, 0.0, 0.0, 0.0], [-0.1006, 0.1905, -0.109, -0.3955], [0.02455, -0.04645, -0.44705, 0.09645], [0.29184999999999994, -0.34475, -4.99999999999945e-05, -0.19195]], [[1.0, 0.0, 0.0, 0.0], [-0.08215, 0.15295, -0.07375, -0.26965], [0.02005, -0.03735, -0.30235, 0.06575], [0.2896000000000001, -0.3417, 2.77555756156289e-17, -0.1617]], [[1.0, 0.0, 0.0, 0.0], [-0.0608, 0.111, -0.0413, -0.1528], [0.01485, -0.02705, -0.16945, 0.03725], [0.2864, -0.337, 0.0, -0.1261]], [[1.0, 0.0, 0.0, 0.0], [-0.0398, 0.0714, -0.0184, -0.069], [0.0097, -0.0174, -0.0755, 0.0168], [0.28215000000000007, -0.33005, -4.99999999999945e-05, -0.09015]]]
set_18 = [[[1.0, 0.0, 0.0, 0.0], [0.00985, -0.92355, -0.01655, 0.06185], [-0.00295000000000001, -0.05645, 0.22465, -0.88415], [0.0004999999999999449, 0.00589999999999999, -0.9175, -0.2298]], [[1.0, 0.0, 0.0, 0.0], [0.00905, -0.78425, -0.00945, 0.06585], [-0.00244999999999995, -0.0344500000000001, 0.18735, -0.71255], [0.0003500000000000725, 0.01505, -0.85195, -0.20755]], [[1.0, 0.0, 0.0, 0.0], [0.0079, -0.5919, -0.00170000000000001, 0.0667], [-0.00175000000000003, -0.0105499999999999, 0.13735, -0.49115], [5.0000000000050004e-05, 0.02795, -0.74955, -0.17535]], [[1.0, 0.0, 0.0, 0.0], [0.00655, -0.39085, 0.00285, 0.05995], [-0.00105, 0.00505, 0.08735, -0.28335], [-0.00035000000000001696, 0.04155, -0.62025, -0.13905]], [[1.0, 0.0, 0.0, 0.0], [0.0052, -0.2195, 0.00289999999999999, 0.0453], [-0.000550000000000023, 0.00945000000000001, 0.04715, -0.13165], [-0.0008999999999999564, 0.0524, -0.4763, -0.1043]], [[1.0, 0.0, 0.0, 0.0], [0.03815, -0.86005, -0.01685, 0.05395], [-0.0112, -0.0564, 0.2102, -0.8339], [0.0010499999999999954, 0.00315000000000001, -0.83565, -0.21085]], [[1.0, 0.0, 0.0, 0.0], [0.035, -0.7302, -0.0098, 0.0583], [-0.00924999999999998, -0.03505, 0.17525, -0.67205], [0.00039999999999995595, 0.0115, -0.776, -0.1903]], [[1.0, 0.0, 0.0, 0.0], [0.03055, -0.55105, -0.00225, 0.05985], [-0.00659999999999999, -0.0117, 0.1283, -0.4632], [-0.0007500000000000284, 0.02355, -0.68275, -0.16055]], [[1.0, 0.0, 0.0, 0.0], [0.02535, -0.36385, 0.00245, 0.05445], [-0.004, 0.00380000000000003, 0.0816, -0.2672], [-0.0022500000000000298, 0.03615, -0.56495, -0.12705]], [[1.0, 0.0, 0.0, 0.0], [0.02025, -0.20435, 0.00265, 0.04145], [-0.002, 0.00850000000000001, 0.0439, -0.1241], [-0.0042999999999999705, 0.0465, -0.4338, -0.095]], [[1.0, 0.0, 0.0, 0.0], [0.0808, -0.7629, -0.0169, 0.0424], [-0.0229, -0.0559999999999999, 0.188, -0.7559], [-0.0007999999999999674, -0.000600000000000017, -0.7143, -0.1825]], [[1.0, 0.0, 0.0, 0.0], [0.0744, -0.6477, -0.0103, 0.0471], [-0.0188499999999999, -0.0356500000000001, 0.15655, -0.60915], [-0.0021999999999999797, 0.0068, -0.6633, -0.1645]], [[1.0, 0.0, 0.0, 0.0], [0.06505, -0.48865, -0.00294999999999999, 0.04975], [-0.0135, -0.0133, 0.1146, -0.4198], [-0.0044999999999999485, 0.0173, -0.5836, -0.1385]], [[1.0, 0.0, 0.0, 0.0], [0.05435, -0.32255, 0.00165, 0.04615], [-0.00814999999999999, 0.00184999999999999, 0.07275, -0.24215], [-0.007749999999999979, 0.02865, -0.48295, -0.10925]], [[1.0, 0.0, 0.0, 0.0], [0.0436, -0.1811, 0.0023, 0.0357], [-0.004, 0.0069, 0.0391, -0.1125], [-0.011900000000000022, 0.0378, -0.3708, -0.0813]], [[1.0, 0.0, 0.0, 0.0], [0.1323, -0.6438, -0.0168, 0.0292], [-0.0355000000000001, -0.0545999999999999, 0.1605, -0.6576], [-0.008550000000000002, -0.00414999999999999, -0.57175, -0.14885]], [[1.0, 0.0, 0.0, 0.0], [0.12225, -0.54645, -0.01065, 0.03425], [-0.0292, -0.0358, 0.1335, -0.5299], [-0.010750000000000037, 0.00194999999999998, -0.53095, -0.13395]], [[1.0, 0.0, 0.0, 0.0], [0.1075, -0.4121, -0.0039, 0.0379], [-0.02085, -0.01495, 0.09765, -0.36515], [-0.014450000000000018, 0.01075, -0.46725, -0.11245]], [[1.0, 0.0, 0.0, 0.0], [0.0904, -0.2719, 0.000800000000000009, 0.0364], [-0.0126, -0.000200000000000006, 0.0618, -0.2106], [-0.019649999999999945, 0.02025, -0.38665, -0.08825]], [[1.0, 0.0, 0.0, 0.0], [0.07305, -0.15265, 0.00195, 0.02875], [-0.0062, 0.0052, 0.0332, -0.0978], [-0.0262, 0.0281, -0.2969, -0.0652]], [[1.0, 0.0, 0.0, 0.0], [0.18645, -0.51565, -0.01605, 0.01645], [-0.0463, -0.052, 0.1305, -0.5481], [-0.025749999999999995, -0.00675000000000001, -0.42735, -0.11415]], [[1.0, 0.0, 0.0, 0.0], [0.173, -0.4376, -0.0107, 0.0216], [-0.03805, -0.03515, 0.10855, -0.44155], [-0.028799999999999992, -0.00190000000000001, -0.3969, -0.1025]], [[1.0, 0.0, 0.0, 0.0], [0.1531, -0.3298, -0.00449999999999999, 0.0261], [-0.02705, -0.01605, 0.0791499999999999, -0.30425], [-0.03384999999999999, 0.00505, -0.34925, -0.08575]], [[1.0, 0.0, 0.0, 0.0], [0.1299, -0.2175, -9.99999999999959e-05, 0.0264], [-0.01625, -0.00234999999999999, 0.05005, -0.17545], [-0.04095000000000004, 0.01255, -0.28895, -0.06685]], [[1.0, 0.0, 0.0, 0.0], [0.106, -0.122, 0.0014, 0.0216], [-0.0079, 0.0034, 0.0267, -0.0815], [-0.04984999999999995, 0.01895, -0.22195, -0.04895]]]
set_19 = [[[1.0, 0.0, 0.0, 0.0], [0.0049499999999999, -0.45105, 0.0436500000000001, 0.79885], [-0.00970000000000004, -0.647, -0.5691, -0.3359], [-0.013549999999999951, 0.46145, -0.75955, 0.31445]], [[1.0, 0.0, 0.0, 0.0], [0.00409999999999999, -0.3605, 0.0235, 0.6643], [-0.00905, -0.52105, -0.52515, -0.26805], [-0.013700000000000045, 0.3751, -0.7281, 0.2858]], [[1.0, 0.0, 0.0, 0.0], [0.00299999999999995, -0.245, 0.00120000000000003, 0.4836], [-0.00805, -0.35995, -0.45835, -0.17645], [-0.013900000000000023, 0.2603, -0.6764, 0.247]], [[1.0, 0.0, 0.0, 0.0], [0.00190000000000001, -0.1385, -0.0137, 0.3029], [-0.00689999999999999, -0.2104, -0.3765, -0.0846], [-0.014049999999999951, 0.14755, -0.60625, 0.20825]], [[1.0, 0.0, 0.0, 0.0], [0.001, -0.0627, -0.0173, 0.1585], [-0.0057, -0.1024, -0.2879, -0.0119], [-0.014150000000000051, 0.06045, -0.52035, 0.17835]], [[1.0, 0.0, 0.0, 0.0], [0.0188999999999999, -0.4264, 0.0444000000000001, 0.7477], [-0.03755, -0.61035, -0.51925, -0.31715], [-0.05299999999999999, 0.4341, -0.6865, 0.2883]], [[1.0, 0.0, 0.0, 0.0], [0.0157, -0.3407, 0.0248999999999999, 0.6217], [-0.03495, -0.49145, -0.47905, -0.25365], [-0.053600000000000037, 0.3532, -0.658, 0.2615]], [[1.0, 0.0, 0.0, 0.0], [0.01145, -0.23155, 0.00324999999999998, 0.45255], [-0.03115, -0.33925, -0.41795, -0.16795], [-0.054300000000000015, 0.2455, -0.6114, 0.2252]], [[1.0, 0.0, 0.0, 0.0], [0.00714999999999999, -0.13085, -0.01145, 0.28335], [-0.0267, -0.198, -0.3431, -0.082], [-0.054950000000000054, 0.13965, -0.54805, 0.18895]], [[1.0, 0.0, 0.0, 0.0], [0.00375, -0.05915, -0.01555, 0.14825], [-0.02215, -0.09615, -0.26225, -0.01385], [-0.05539999999999995, 0.0579, -0.4703, 0.1608]], [[1.0, 0.0, 0.0, 0.0], [0.03905, -0.38775, 0.04525, 0.66905], [-0.0797, -0.5536, -0.4452, -0.2877], [-0.11509999999999998, 0.3917, -0.579, 0.2494]], [[1.0, 0.0, 0.0, 0.0], [0.03255, -0.30985, 0.0267500000000001, 0.55625], [-0.0743, -0.4456, -0.4105, -0.2309], [-0.11624999999999996, 0.31895, -0.55515, 0.22545]], [[1.0, 0.0, 0.0, 0.0], [0.02375, -0.21055, 0.00605, 0.40475], [-0.0664, -0.3072, -0.3578, -0.1543], [-0.11775000000000002, 0.22225, -0.51585, 0.19305]], [[1.0, 0.0, 0.0, 0.0], [0.0149, -0.1189, -0.00830000000000003, 0.2534], [-0.0571, -0.1789, -0.2936, -0.0774], [-0.11915000000000003, 0.12725, -0.46235, 0.16055]], [[1.0, 0.0, 0.0, 0.0], [0.00785, -0.05385, -0.01295, 0.13245], [-0.0474, -0.0865, -0.2243, -0.0162], [-0.11989999999999995, 0.0536, -0.3969, 0.1352]], [[1.0, 0.0, 0.0, 0.0], [0.06185, -0.33905, 0.04515, 0.57155], [-0.13085, -0.48225, -0.35815, -0.24995], [-0.19484999999999997, 0.33815, -0.45485, 0.20355]], [[1.0, 0.0, 0.0, 0.0], [0.0515, -0.2709, 0.0282, 0.4751], [-0.1222, -0.3879, -0.3299, -0.2015], [-0.19670000000000004, 0.2759, -0.4361, 0.1832]], [[1.0, 0.0, 0.0, 0.0], [0.0376, -0.184, 0.00900000000000001, 0.3456], [-0.1095, -0.2671, -0.2873, -0.1362], [-0.19900000000000007, 0.1928, -0.4054, 0.1556]], [[1.0, 0.0, 0.0, 0.0], [0.0237, -0.104, -0.0048, 0.2162], [-0.0945, -0.1551, -0.2354, -0.0705], [-0.20114999999999994, 0.11115, -0.36345, 0.12775]], [[1.0, 0.0, 0.0, 0.0], [0.0125, -0.047, -0.00980000000000002, 0.1129], [-0.07875, -0.07455, -0.17965, -0.01815], [-0.20245000000000002, 0.04785, -0.31195, 0.10595]], [[1.0, 0.0, 0.0, 0.0], [0.0831, -0.2844, 0.0438, 0.4652], [-0.18495, -0.40275, -0.26955, -0.20725], [-0.28665000000000007, 0.27885, -0.33155, 0.15665]], [[1.0, 0.0, 0.0, 0.0], [0.0693, -0.2272, 0.0287000000000001, 0.3866], [-0.17315, -0.32365, -0.24805, -0.16795], [-0.2889999999999999, 0.2278, -0.3181, 0.1402]], [[1.0, 0.0, 0.0, 0.0], [0.0506, -0.1543, 0.0115, 0.2811], [-0.1557, -0.2225, -0.2156, -0.1149], [-0.2920499999999999, 0.15985, -0.29575, 0.11775]], [[1.0, 0.0, 0.0, 0.0], [0.03195, -0.08715, -0.00134999999999999, 0.17575], [-0.135, -0.1287, -0.1763, -0.0615], [-0.29485000000000006, 0.09295, -0.26525, 0.09515]], [[1.0, 0.0, 0.0, 0.0], [0.01695, -0.03935, -0.00674999999999999, 0.09165], [-0.113, -0.0614, -0.1344, -0.0188], [-0.2966, 0.041, -0.2276, 0.0773]]]
set_20 = [[[1.0, 0.0, 0.0, 0.0], [0.00669999999999998, 0.7822, 0.3313, 0.3497], [0.000800000000000023, 0.0499999999999999, 0.6066, -0.7199], [-0.0009999999999999454, -0.4718, 0.6176, 0.5002]], [[1.0, 0.0, 0.0, 0.0], [0.00635000000000002, 0.63365, 0.27025, 0.31655], [0.000700000000000034, 0.0363, 0.5372, -0.6751], [-0.0010499999999999954, -0.38245, 0.51615, 0.44455]], [[1.0, 0.0, 0.0, 0.0], [0.00584999999999997, 0.44105, 0.18495, 0.26995], [0.00055000000000005, 0.02015, 0.44115, -0.59975], [-0.0011000000000000454, -0.2652, 0.3744, 0.3689]], [[1.0, 0.0, 0.0, 0.0], [0.00519999999999998, 0.2589, 0.0953, 0.2197], [0.000349999999999961, 0.00735000000000008, 0.33775, -0.49725], [-0.0012499999999999734, -0.15255, 0.22315, 0.29245]], [[1.0, 0.0, 0.0, 0.0], [0.00445000000000001, 0.12395, 0.01975, 0.17415], [0.000250000000000028, 0.000549999999999995, 0.24125, -0.37755], [-0.0013500000000000179, -0.06855, 0.08925, 0.23175]], [[1.0, 0.0, 0.0, 0.0], [0.02625, 0.73695, 0.31115, 0.32075], [0.00235000000000007, 0.0482499999999999, 0.55955, -0.65375], [-0.0042999999999999705, -0.4443, 0.577, 0.461]], [[1.0, 0.0, 0.0, 0.0], [0.02505, 0.59685, 0.25435, 0.29005], [0.002, 0.0351, 0.4948, -0.6136], [-0.004450000000000065, -0.36035, 0.48265, 0.40905]], [[1.0, 0.0, 0.0, 0.0], [0.02315, 0.41535, 0.17505, 0.24675], [0.00150000000000006, 0.0196999999999999, 0.4054, -0.5458], [-0.004750000000000032, -0.24995, 0.35095, 0.33835]], [[1.0, 0.0, 0.0, 0.0], [0.0206, 0.2437, 0.0916, 0.2001], [0.000950000000000006, 0.00734999999999997, 0.30965, -0.45305], [-0.005149999999999988, -0.14405, 0.21045, 0.26685]], [[1.0, 0.0, 0.0, 0.0], [0.0176, 0.1166, 0.0211, 0.158], [0.0005, 0.000699999999999978, 0.2207, -0.3443], [-0.005649999999999988, -0.06485, 0.08625, 0.20995]], [[1.0, 0.0, 0.0, 0.0], [0.05755, 0.66675, 0.27975, 0.27765], [0.00244999999999995, 0.04545, 0.48895, -0.55565], [-0.010899999999999965, -0.4015, 0.5144, 0.4023]], [[1.0, 0.0, 0.0, 0.0], [0.055, 0.5398, 0.2295, 0.2505], [0.00195000000000001, 0.03325, 0.43125, -0.52245], [-0.011300000000000032, -0.3257, 0.431, 0.356]], [[1.0, 0.0, 0.0, 0.0], [0.05085, 0.37545, 0.15925, 0.21235], [0.00119999999999998, 0.0188, 0.352, -0.4657], [-0.011849999999999972, -0.22635, 0.31455, 0.29295]], [[1.0, 0.0, 0.0, 0.0], [0.04535, 0.22015, 0.08525, 0.17125], [0.000399999999999956, 0.00730000000000003, 0.2677, -0.3874], [-0.012599999999999945, -0.1307, 0.1904, 0.229]], [[1.0, 0.0, 0.0, 0.0], [0.0387, 0.1053, 0.0226, 0.1342], [-0.000200000000000033, 0.000900000000000012, 0.1899, -0.2948], [-0.013549999999999951, -0.05915, 0.08085, 0.17795]], [[1.0, 0.0, 0.0, 0.0], [0.09855, 0.57855, 0.23985, 0.22655], [-0.00145000000000001, 0.04175, 0.40455, -0.44055], [-0.022550000000000014, -0.34765, 0.43675, 0.33225]], [[1.0, 0.0, 0.0, 0.0], [0.09425, 0.46825, 0.19775, 0.20385], [-0.00185000000000002, 0.03065, 0.35555, -0.41535], [-0.023050000000000015, -0.28235, 0.36655, 0.29295]], [[1.0, 0.0, 0.0, 0.0], [0.08725, 0.32555, 0.13875, 0.17185], [-0.00245000000000001, 0.01765, 0.28855, -0.37155], [-0.023900000000000032, -0.1964, 0.2687, 0.2393]], [[1.0, 0.0, 0.0, 0.0], [0.07795, 0.19065, 0.07635, 0.13745], [-0.00290000000000001, 0.00700000000000001, 0.2179, -0.3101], [-0.025099999999999956, -0.1137, 0.1646, 0.1849]], [[1.0, 0.0, 0.0, 0.0], [0.0667, 0.0911, 0.0234, 0.1066], [-0.00305, 0.00114999999999998, 0.15365, -0.23655], [-0.026499999999999968, -0.0518, 0.0729, 0.1412]], [[1.0, 0.0, 0.0, 0.0], [0.14625, 0.48065, 0.19545, 0.17405], [-0.0111, 0.037, 0.3167, -0.3243], [-0.04130000000000006, -0.2877, 0.3522, 0.2595]], [[1.0, 0.0, 0.0, 0.0], [0.13995, 0.38885, 0.16205, 0.15605], [-0.01105, 0.02745, 0.27705, -0.30705], [-0.04194999999999999, -0.23395, 0.29615, 0.22775]], [[1.0, 0.0, 0.0, 0.0], [0.12975, 0.27015, 0.11515, 0.13065], [-0.01075, 0.01595, 0.22315, -0.27605], [-0.04305000000000003, -0.16305, 0.21815, 0.18445]], [[1.0, 0.0, 0.0, 0.0], [0.1161, 0.1581, 0.0653, 0.1034], [-0.01005, 0.00665000000000002, 0.16695, -0.23155], [-0.044499999999999984, -0.0948, 0.1353, 0.1403]], [[1.0, 0.0, 0.0, 0.0], [0.0997, 0.0753, 0.0227, 0.0791], [-0.00879999999999997, 0.00129999999999997, 0.1166, -0.1773], [-0.0464, -0.0434, 0.0627, 0.1047]]]
set_21 = [[[1.0, 0.0, 0.0, 0.0], [0.01215, -0.51565, -0.04245, -0.78065], [-0.000600000000000003, 0.0, -0.92, 0.0514], [-0.011100000000000054, -0.7606, 0.0289, 0.5305]], [[1.0, 0.0, 0.0, 0.0], [0.0114, -0.4238, -0.0372, -0.7208], [-0.000399999999999998, 0.0, -0.7667, 0.0453], [-0.010750000000000037, -0.62795, 0.02545, 0.49235]], [[1.0, 0.0, 0.0, 0.0], [0.0102, -0.3022, -0.0299, -0.6279], [-0.000150000000000004, 5.00000000000014e-05, -0.55975, 0.03675], [-0.010199999999999987, -0.451, 0.0206000000000001, 0.4331]], [[1.0, 0.0, 0.0, 0.0], [0.00864999999999994, -0.18335, -0.0221499999999999, -0.51155], [5.00000000000014e-05, 4.9999999999998e-05, -0.35135, 0.02735], [-0.009549999999999947, -0.27615, 0.01555, 0.35855]], [[1.0, 0.0, 0.0, 0.0], [0.00690000000000002, -0.0916, -0.0152, -0.3838], [0.000199999999999999, 0.0, -0.1837, 0.0185], [-0.008799999999999975, -0.1391, 0.0109, 0.2761]], [[1.0, 0.0, 0.0, 0.0], [0.0460999999999999, -0.484, -0.0391999999999999, -0.712], [-0.0021, 0.0, -0.8606, 0.0474], [-0.04269999999999996, -0.7131, 0.0267, 0.4833]], [[1.0, 0.0, 0.0, 0.0], [0.04325, -0.39775, -0.03435, -0.65745], [-0.00135, -5.00000000000014e-05, -0.71715, 0.04175], [-0.04135, -0.58875, 0.02345, 0.44855]], [[1.0, 0.0, 0.0, 0.0], [0.0387, -0.2836, -0.0276, -0.5728], [-0.000399999999999998, 0.0, -0.5236, 0.0339], [-0.039400000000000046, -0.4229, 0.019, 0.3945]], [[1.0, 0.0, 0.0, 0.0], [0.0329, -0.1721, -0.0204, -0.4667], [0.000450000000000002, -5.00000000000014e-05, -0.32875, 0.02515], [-0.03685000000000005, -0.25895, 0.01425, 0.32655]], [[1.0, 0.0, 0.0, 0.0], [0.02625, -0.08595, -0.01385, -0.35015], [0.00085, 5.00000000000014e-05, -0.17185, 0.01695], [-0.03410000000000002, -0.1305, 0.00999999999999995, 0.2514]], [[1.0, 0.0, 0.0, 0.0], [0.0949, -0.4352, -0.0344, -0.61], [-0.00365, 5.00000000000014e-05, -0.76925, 0.04145], [-0.09004999999999996, -0.63985, 0.02335, 0.41325]], [[1.0, 0.0, 0.0, 0.0], [0.08905, -0.35765, -0.03005, -0.56325], [-0.0021, 6.93889390390723e-18, -0.6411, 0.0365], [-0.08749999999999997, -0.5283, 0.0205, 0.3835]], [[1.0, 0.0, 0.0, 0.0], [0.0798, -0.2549, -0.0241, -0.4908], [-0.000299999999999998, -3.46944695195361e-18, -0.468, 0.0295], [-0.08359999999999995, -0.3795, 0.0166, 0.3372]], [[1.0, 0.0, 0.0, 0.0], [0.06795, -0.15465, -0.01765, -0.39995], [0.00125, 5.00000000000014e-05, -0.29375, 0.02185], [-0.07869999999999999, -0.2324, 0.0124, 0.2791]], [[1.0, 0.0, 0.0, 0.0], [0.05455, -0.07725, -0.01205, -0.30015], [0.0021, 0.0, -0.1536, 0.0147], [-0.07329999999999998, -0.1172, 0.0086, 0.2148]], [[1.0, 0.0, 0.0, 0.0], [0.1494, -0.3743, -0.0285, -0.4899], [-0.004, 0.0, -0.6562, 0.0343], [-0.14704999999999996, -0.54875, 0.01925, 0.33095]], [[1.0, 0.0, 0.0, 0.0], [0.1404, -0.3076, -0.025, -0.4524], [-0.00185, 5.00000000000014e-05, -0.54675, 0.03015], [-0.14335000000000003, -0.45315, 0.01695, 0.30705]], [[1.0, 0.0, 0.0, 0.0], [0.1261, -0.2192, -0.0199, -0.3942], [0.000899999999999998, 3.46944695195361e-18, -0.3992, 0.0243], [-0.13759999999999994, -0.3255, 0.0136000000000001, 0.27]], [[1.0, 0.0, 0.0, 0.0], [0.10775, -0.13295, -0.01445, -0.32135], [0.00305, 4.9999999999998e-05, -0.25055, 0.01795], [-0.13050000000000006, -0.1994, 0.0101, 0.2234]], [[1.0, 0.0, 0.0, 0.0], [0.08685, -0.06635, -0.00975000000000001, -0.24125], [0.004, 0.0, -0.131, 0.012], [-0.12274999999999997, -0.10045, 0.00694999999999998, 0.17185]], [[1.0, 0.0, 0.0, 0.0], [0.2008, -0.3074, -0.0224, -0.3677], [-0.0023, 0.0, -0.5329, 0.0268], [-0.20795000000000002, -0.44875, 0.01515, 0.24755]], [[1.0, 0.0, 0.0, 0.0], [0.189, -0.2526, -0.0196, -0.3396], [0.000400000000000001, 0.0, -0.4441, 0.0235], [-0.20344999999999996, -0.37065, 0.01315, 0.22965]], [[1.0, 0.0, 0.0, 0.0], [0.17025, -0.17995, -0.01545, -0.29595], [0.00355, 4.9999999999998e-05, -0.32425, 0.01885], [-0.19655, -0.26625, 0.01055, 0.20185]], [[1.0, 0.0, 0.0, 0.0], [0.1462, -0.1091, -0.0113, -0.2413], [0.00595, -5.00000000000014e-05, -0.20355, 0.01385], [-0.18799999999999994, -0.1632, 0.00779999999999997, 0.167]], [[1.0, 0.0, 0.0, 0.0], [0.1186, -0.0544, -0.00749999999999998, -0.1812], [0.00665, -4.99999999999997e-05, -0.10635, 0.00925], [-0.17880000000000001, -0.0822, 0.0053, 0.1284]]]
set_22 = [[[1.0, 0.0, 0.0, 0.0], [0.0, 0.828, 0.1638, -0.3437], [-0.00340000000000007, 0.3657, -0.0851, 0.8425], [-0.0009500000000000064, 0.10975, -0.92865, -0.18275]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.66, 0.1278, -0.2758], [-0.00324999999999998, 0.29735, -0.00605, 0.71985], [-0.0009999999999999454, 0.0816, -0.8816, -0.2376]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.4463, 0.0828, -0.1889], [-0.00305, 0.20905, 0.09155, 0.55995], [-0.0009500000000000064, 0.04535, -0.81855, -0.30895]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.2501, 0.0429, -0.1081], [-0.00274999999999997, 0.12555, 0.17385, 0.40555], [-0.0009500000000000064, 0.01215, -0.75555, -0.37605]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.1115, 0.0164, -0.05], [-0.00244999999999995, 0.0635499999999999, 0.21865, 0.28345], [-0.0009999999999999454, -0.00969999999999999, -0.7057, -0.4236]], [[1.0, 0.0, 0.0, 0.0], [4.99999999999945e-05, 0.78305, 0.15565, -0.32455], [-0.01375, 0.34425, -0.0963499999999999, 0.78425], [-0.003599999999999992, 0.1053, -0.8422, -0.1497]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.6242, 0.1215, -0.2604], [-0.0132, 0.2797, -0.0212, 0.6687], [-0.003599999999999992, 0.0787, -0.7981, -0.2014]], [[1.0, 0.0, 0.0, 0.0], [4.99999999999945e-05, 0.42205, 0.07885, -0.17825], [-0.0124, 0.1965, 0.0717, 0.5183], [-0.003599999999999992, 0.0445, -0.739, -0.2686]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.2365, 0.0409, -0.102], [-0.01125, 0.11795, 0.15055, 0.37325], [-0.0036000000000000476, 0.0131000000000001, -0.6801, -0.3319]], [[1.0, 0.0, 0.0, 0.0], [0.0, 0.1054, 0.0157, -0.0472], [-0.00985, 0.05945, 0.19425, 0.25905], [-0.0036499999999999866, -0.00785000000000002, -0.63335, -0.37695]], [[1.0, 0.0, 0.0, 0.0], [0.000149999999999983, 0.71295, 0.14285, -0.29475], [-0.0313500000000001, 0.31105, -0.11045, 0.69605], [-0.00714999999999999, 0.09795, -0.71535, -0.10365]], [[1.0, 0.0, 0.0, 0.0], [0.000149999999999983, 0.56835, 0.11155, -0.23645], [-0.0302, 0.2526, -0.0416, 0.5915], [-0.00720000000000004, 0.0738, -0.6759, -0.1503]], [[1.0, 0.0, 0.0, 0.0], [9.9999999999989e-05, 0.3843, 0.0725, -0.1618], [-0.0284, 0.1772, 0.0439, 0.4556], [-0.007199999999999929, 0.0427, -0.623, -0.2111]], [[1.0, 0.0, 0.0, 0.0], [4.99999999999945e-05, 0.21535, 0.03785, -0.09255], [-0.0258, 0.1061, 0.1171, 0.325], [-0.007249999999999979, 0.01405, -0.57015, -0.26855]], [[1.0, 0.0, 0.0, 0.0], [4.99999999999945e-05, 0.09595, 0.01455, -0.04275], [-0.0226, 0.0533, 0.1589, 0.2229], [-0.007300000000000084, -0.00519999999999998, -0.5283, -0.3096]], [[1.0, 0.0, 0.0, 0.0], [0.000400000000000011, 0.6242, 0.1264, -0.2571], [-0.0562999999999999, 0.2696, -0.122, 0.5892], [-0.011600000000000055, 0.0878, -0.5689, -0.0548]], [[1.0, 0.0, 0.0, 0.0], [0.000349999999999989, 0.49755, 0.09885, -0.20625], [-0.0543, 0.2187, -0.0612, 0.4983], [-0.011700000000000044, 0.0669, -0.5351, -0.0951]], [[1.0, 0.0, 0.0, 0.0], [0.00025, 0.33645, 0.06445, -0.14105], [-0.051, 0.1531, 0.0145, 0.3805], [-0.011800000000000033, 0.0399, -0.4898, -0.1478]], [[1.0, 0.0, 0.0, 0.0], [0.000200000000000006, 0.1885, 0.0337, -0.0806], [-0.0465, 0.0914, 0.0802, 0.2678], [-0.011900000000000022, 0.0147, -0.4447, -0.1977]], [[1.0, 0.0, 0.0, 0.0], [9.99999999999959e-05, 0.0841, 0.0132, -0.0372], [-0.04075, 0.04565, 0.11895, 0.18055], [-0.01200000000000001, -0.00240000000000001, -0.4089, -0.2337]], [[1.0, 0.0, 0.0, 0.0], [0.000850000000000017, 0.52455, 0.10765, -0.21515], [-0.0876, 0.2237, -0.1268, 0.4755], [-0.01855000000000001, 0.07565, -0.42325, -0.01255]], [[1.0, 0.0, 0.0, 0.0], [0.000699999999999978, 0.4182, 0.0844, -0.1725], [-0.08455, 0.18125, -0.07525, 0.39985], [-0.018750000000000044, 0.05835, -0.39575, -0.04585]], [[1.0, 0.0, 0.0, 0.0], [0.000600000000000003, 0.2827, 0.055, -0.1179], [-0.0795, 0.1266, -0.0108, 0.302], [-0.018899999999999972, 0.0357, -0.3589, -0.0895]], [[1.0, 0.0, 0.0, 0.0], [0.000399999999999998, 0.1584, 0.0289, -0.0673], [-0.0725, 0.0752, 0.0458, 0.2089], [-0.01920000000000005, 0.0147, -0.322, -0.131]], [[1.0, 0.0, 0.0, 0.0], [0.000199999999999999, 0.0707, 0.0114, -0.031], [-0.06365, 0.03735, 0.08045, 0.13755], [-0.01940000000000003, 9.9999999999989e-05, -0.2928, -0.1612]]]
set_23 = [[[1.0, 0.0, 0.0, 0.0], [-0.0021, 0.1007, -0.9088, -0.0242], [-0.00429999999999997, 0.2624, -0.00190000000000001, 0.8778], [0.0041500000000000425, -0.90705, -0.11825, 0.27505]], [[1.0, 0.0, 0.0, 0.0], [-0.0016, 0.0685, -0.7375, -0.0025], [-0.00359999999999994, 0.2092, -0.0146000000000001, 0.7165], [0.0040999999999999925, -0.8707, -0.1331, 0.2728]], [[1.0, 0.0, 0.0, 0.0], [-0.001, 0.0295, -0.5157, 0.0202], [-0.00260000000000005, 0.1414, -0.0258999999999999, 0.5051], [0.0040000000000000036, -0.8122, -0.1532, 0.2702]], [[1.0, 0.0, 0.0, 0.0], [-0.000349999999999996, -0.00285000000000001, -0.30615, 0.03345], [-0.00159999999999999, 0.0792, -0.029, 0.3016], [0.0040000000000000036, -0.7341, -0.1738, 0.2683]], [[1.0, 0.0, 0.0, 0.0], [0.000150000000000004, -0.02055, -0.15135, 0.03395], [-0.000799999999999995, 0.0353, -0.023, 0.1474], [0.0039000000000000146, -0.6389, -0.1906, 0.2676]], [[1.0, 0.0, 0.0, 0.0], [-0.0079, 0.0984, -0.8558, -0.0276], [-0.0163, 0.2481, 0.00190000000000001, 0.8254], [0.015449999999999964, -0.81955, -0.10215, 0.24655]], [[1.0, 0.0, 0.0, 0.0], [-0.006, 0.0677, -0.6943, -0.0064], [-0.0134500000000001, 0.19775, -0.0107499999999999, 0.67365], [0.015249999999999986, -0.78655, -0.11595, 0.24445]], [[1.0, 0.0, 0.0, 0.0], [-0.0035, 0.0304, -0.4853, 0.016], [-0.00974999999999998, 0.13375, -0.02235, 0.47485], [0.015050000000000008, -0.73365, -0.13485, 0.24195]], [[1.0, 0.0, 0.0, 0.0], [-0.00115, -0.000649999999999998, -0.28785, 0.02935], [-0.00605, 0.07495, -0.02605, 0.28345], [0.01475000000000004, -0.66285, -0.15395, 0.24005]], [[1.0, 0.0, 0.0, 0.0], [0.0006, -0.0178, -0.142, 0.0306], [-0.00310000000000002, 0.0334, -0.021, 0.1385], [0.014550000000000007, -0.57695, -0.16965, 0.23935]], [[1.0, 0.0, 0.0, 0.0], [-0.01585, 0.09415, -0.77375, -0.03205], [-0.0335500000000001, 0.22585, 0.00735000000000008, 0.74425], [0.03059999999999996, -0.6911, -0.0793, 0.2053]], [[1.0, 0.0, 0.0, 0.0], [-0.01195, 0.06585, -0.62745, -0.01185], [-0.0277500000000001, 0.18005, -0.00514999999999999, 0.60735], [0.030200000000000005, -0.6631, -0.0916, 0.2033]], [[1.0, 0.0, 0.0, 0.0], [-0.00685, 0.03145, -0.43815, 0.00985], [-0.02005, 0.12175, -0.01695, 0.42795], [0.029749999999999943, -0.61835, -0.10845, 0.20095]], [[1.0, 0.0, 0.0, 0.0], [-0.00195, 0.00245, -0.25945, 0.02345], [-0.0124, 0.0682, -0.0216000000000001, 0.2554], [0.029249999999999998, -0.55855, -0.12555, 0.19915]], [[1.0, 0.0, 0.0, 0.0], [0.00155, -0.01385, -0.12755, 0.02565], [-0.00634999999999999, 0.03045, -0.01805, 0.12475], [0.028750000000000053, -0.48595, -0.13955, 0.19845]], [[1.0, 0.0, 0.0, 0.0], [-0.0238, 0.0876, -0.6709, -0.0364], [-0.05265, 0.19775, 0.01335, 0.64275], [0.04469999999999996, -0.5427, -0.0544, 0.1585]], [[1.0, 0.0, 0.0, 0.0], [-0.0177, 0.0625, -0.5438, -0.0175], [-0.0436, 0.1577, 0.00119999999999998, 0.5244], [0.04420000000000002, -0.5207, -0.0649, 0.1567]], [[1.0, 0.0, 0.0, 0.0], [-0.00975, 0.03175, -0.37935, 0.00305], [-0.03145, 0.10655, -0.01075, 0.36945], [0.04344999999999999, -0.48535, -0.07915, 0.15455]], [[1.0, 0.0, 0.0, 0.0], [-0.00225, 0.00575, -0.22405, 0.01655], [-0.01945, 0.05975, -0.01635, 0.22035], [0.04259999999999997, -0.4381, -0.0937, 0.1529]], [[1.0, 0.0, 0.0, 0.0], [0.0032, -0.0095, -0.1097, 0.0199], [-0.00990000000000001, 0.0266, -0.0146, 0.1076], [0.04190000000000005, -0.381, -0.1056, 0.1521]], [[1.0, 0.0, 0.0, 0.0], [-0.0296, 0.0788, -0.557, -0.0393], [-0.0698, 0.1661, 0.0185, 0.5307], [0.052149999999999974, -0.39585, -0.03185, 0.11315]], [[1.0, 0.0, 0.0, 0.0], [-0.0216, 0.0573, -0.4511, -0.0222], [-0.0578, 0.1325, 0.0071, 0.4329], [0.05140000000000006, -0.3795, -0.0402, 0.1116]], [[1.0, 0.0, 0.0, 0.0], [-0.0112, 0.0309, -0.3142, -0.0032], [-0.0417, 0.0895, -0.00470000000000004, 0.3048], [0.05045000000000005, -0.35355, -0.05175, 0.10975]], [[1.0, 0.0, 0.0, 0.0], [-0.00135, 0.00825, -0.18515, 0.00985], [-0.02575, 0.05015, -0.01105, 0.18165], [0.049399999999999944, -0.319, -0.0635, 0.1082]], [[1.0, 0.0, 0.0, 0.0], [0.0057, -0.0054, -0.0902, 0.0141], [-0.0131, 0.0223, -0.0109, 0.0886], [0.04849999999999999, -0.2772, -0.0732, 0.1075]]]
set_24 = [[[1.0, 0.0, 0.0, 0.0], [0.00955, -0.08645, 0.90905, -0.10225], [-0.000500000000000056, 0.0877000000000001, -0.0921, -0.9194], [0.00930000000000003, -0.9321, -0.074, -0.0832]], [[1.0, 0.0, 0.0, 0.0], [0.0092, -0.111, 0.746, -0.0931], [-0.000399999999999956, 0.0720999999999999, -0.0746, -0.7903], [0.00930000000000003, -0.8422, -0.0254, -0.083]], [[1.0, 0.0, 0.0, 0.0], [0.00865, -0.13245, 0.53345, -0.07955], [-0.000350000000000072, 0.0515500000000001, -0.05185, -0.60865], [0.00930000000000003, -0.7105, 0.0338, -0.0823]], [[1.0, 0.0, 0.0, 0.0], [0.00785, -0.13505, 0.32975, -0.06365], [-0.000250000000000028, 0.03135, -0.03035, -0.41355], [0.00930000000000003, -0.5583, 0.0817, -0.0805]], [[1.0, 0.0, 0.0, 0.0], [0.00684999999999999, -0.11385, 0.17425, -0.04755], [-0.000200000000000006, 0.0157, -0.0144, -0.2412], [0.00930000000000003, -0.4057, 0.1031, -0.0772]], [[1.0, 0.0, 0.0, 0.0], [0.03705, -0.07035, 0.85385, -0.09365], [-0.00150000000000006, 0.0823, -0.0868, -0.8535], [0.03475000000000006, -0.85505, -0.07865, -0.07445]], [[1.0, 0.0, 0.0, 0.0], [0.0357, -0.0948, 0.7002, -0.0852], [-0.00135000000000007, 0.0677500000000001, -0.0702499999999999, -0.73365], [0.03479999999999994, -0.7722, -0.0325, -0.0743]], [[1.0, 0.0, 0.0, 0.0], [0.03345, -0.11685, 0.50025, -0.07275], [-0.00114999999999998, 0.04835, -0.0488500000000001, -0.56505], [0.03475000000000006, -0.65085, 0.02405, -0.07365]], [[1.0, 0.0, 0.0, 0.0], [0.03035, -0.12135, 0.30875, -0.05815], [-0.000899999999999956, 0.0295, -0.0285, -0.3839], [0.03475000000000006, -0.51085, 0.07025, -0.07215]], [[1.0, 0.0, 0.0, 0.0], [0.0265, -0.1034, 0.1627, -0.0434], [-0.000600000000000017, 0.0147, -0.0136, -0.2239], [0.03469999999999995, -0.3707, 0.0918, -0.0693]], [[1.0, 0.0, 0.0, 0.0], [0.0789, -0.0472, 0.7689, -0.0807], [-0.00185000000000002, 0.0740500000000001, -0.07845, -0.75325], [0.06964999999999999, -0.73985, -0.08395, -0.06175]], [[1.0, 0.0, 0.0, 0.0], [0.076, -0.0713, 0.6299, -0.0734], [-0.00170000000000003, 0.0609000000000001, -0.0636, -0.6475], [0.06964999999999999, -0.66745, -0.04165, -0.06165]], [[1.0, 0.0, 0.0, 0.0], [0.0712, -0.094, 0.4492, -0.0626], [-0.00155, 0.04355, -0.04415, -0.49865], [0.06959999999999994, -0.5617, 0.0106, -0.0612]], [[1.0, 0.0, 0.0, 0.0], [0.06465, -0.10125, 0.27635, -0.04995], [-0.00125000000000003, 0.02645, -0.02575, -0.33875], [0.06959999999999994, -0.44, 0.0539, -0.0601]], [[1.0, 0.0, 0.0, 0.0], [0.0565, -0.088, 0.145, -0.0372], [-0.000900000000000012, 0.0132, -0.0123, -0.1976], [0.06955, -0.31865, 0.07535, -0.05775]], [[1.0, 0.0, 0.0, 0.0], [0.13, -0.0216, 0.6631, -0.0655], [-4.99999999999945e-05, 0.06355, -0.06815, -0.63095], [0.10420000000000001, -0.603, -0.0873, -0.0473]], [[1.0, 0.0, 0.0, 0.0], [0.12515, -0.04475, 0.54255, -0.05945], [-0.000299999999999967, 0.0523, -0.0551, -0.5423], [0.10420000000000001, -0.5433, -0.0499, -0.0473]], [[1.0, 0.0, 0.0, 0.0], [0.1172, -0.0678, 0.3859, -0.0506], [-0.000549999999999995, 0.03735, -0.03825, -0.41765], [0.10420000000000001, -0.4562, -0.0035, -0.0471]], [[1.0, 0.0, 0.0, 0.0], [0.1064, -0.0778, 0.2364, -0.0403], [-0.000750000000000028, 0.02275, -0.02235, -0.28375], [0.10414999999999996, -0.35635, 0.03585, -0.04625]], [[1.0, 0.0, 0.0, 0.0], [0.09295, -0.06975, 0.12325, -0.02985], [-0.000799999999999995, 0.0114, -0.0106, -0.1655], [0.10410000000000003, -0.2573, 0.0566, -0.0446]], [[1.0, 0.0, 0.0, 0.0], [0.1841, 0.00199999999999997, 0.547, -0.0499], [0.00474999999999998, 0.05215, -0.05665, -0.50035], [0.12795, -0.46215, -0.08665, -0.03335]], [[1.0, 0.0, 0.0, 0.0], [0.1772, -0.0196, 0.4468, -0.0452], [0.00380000000000003, 0.0429, -0.0458, -0.4301], [0.12789999999999996, -0.4155, -0.0549, -0.0334]], [[1.0, 0.0, 0.0, 0.0], [0.16595, -0.04215, 0.31675, -0.03835], [0.00245000000000001, 0.03065, -0.03185, -0.33125], [0.12795, -0.34795, -0.01535, -0.03335]], [[1.0, 0.0, 0.0, 0.0], [0.1506, -0.0543, 0.1931, -0.0304], [0.00115000000000001, 0.01865, -0.01855, -0.22505], [0.12785000000000002, -0.27065, 0.01895, -0.03285]], [[1.0, 0.0, 0.0, 0.0], [0.13155, -0.05125, 0.09985, -0.02245], [0.00025, 0.00925000000000001, -0.00885, -0.13125], [0.12779999999999997, -0.1946, 0.0384, -0.0318]]]
set_25 = [[[1.0, 0.0, 0.0, 0.0], [0.00489999999999999, -0.8165, 0.4391, 0.0437], [-0.00989999999999996, 0.3945, 0.7328, -0.409], [-0.00934999999999997, -0.22745, -0.33235, -0.83855]], [[1.0, 0.0, 0.0, 0.0], [0.00445, -0.71665, 0.34885, 0.04655], [-0.00939999999999996, 0.3642, 0.6041, -0.3678], [-0.009149999999999991, -0.19375, -0.27075, -0.73465]], [[1.0, 0.0, 0.0, 0.0], [0.00385000000000001, -0.57275, 0.23425, 0.04665], [-0.00864999999999999, 0.31775, 0.43335, -0.30515], [-0.008799999999999975, -0.1444, -0.1897, -0.5856]], [[1.0, 0.0, 0.0, 0.0], [0.00305, -0.41185, 0.12955, 0.04045], [-0.0076, 0.2603, 0.2654, -0.2293], [-0.008500000000000008, -0.0887, -0.1113, -0.4199]], [[1.0, 0.0, 0.0, 0.0], [0.0022, -0.2612, 0.0563, 0.0276], [-0.00645000000000001, 0.19785, 0.13465, -0.15075], [-0.008199999999999985, -0.0374, -0.052, -0.2654]], [[1.0, 0.0, 0.0, 0.0], [0.018, -0.7541, 0.4156, 0.0381], [-0.0378, 0.3599, 0.6873, -0.3754], [-0.03650000000000014, -0.2114, -0.3126, -0.7749]], [[1.0, 0.0, 0.0, 0.0], [0.01655, -0.66185, 0.33015, 0.04125], [-0.03595, 0.33215, 0.56665, -0.33775], [-0.03560000000000002, -0.1804, -0.2547, -0.6788]], [[1.0, 0.0, 0.0, 0.0], [0.0142, -0.5288, 0.2217, 0.042], [-0.03305, 0.28975, 0.40635, -0.28035], [-0.03440000000000015, -0.1348, -0.1785, -0.5409]], [[1.0, 0.0, 0.0, 0.0], [0.0113, -0.3802, 0.1227, 0.0369], [-0.0292, 0.2373, 0.2489, -0.2109], [-0.03315000000000001, -0.08345, -0.10485, -0.38775]], [[1.0, 0.0, 0.0, 0.0], [0.0082, -0.2411, 0.0534, 0.0255], [-0.0247, 0.1803, 0.1261, -0.1389], [-0.032149999999999956, -0.03595, -0.04905, -0.24495]], [[1.0, 0.0, 0.0, 0.0], [0.03525, -0.65975, 0.37895, 0.02985], [-0.07895, 0.30845, 0.61725, -0.32495], [-0.0786, -0.1868, -0.2819, -0.6786]], [[1.0, 0.0, 0.0, 0.0], [0.03235, -0.57905, 0.30105, 0.03335], [-0.07515, 0.28465, 0.50875, -0.29255], [-0.07679999999999998, -0.1598, -0.2298, -0.5943]], [[1.0, 0.0, 0.0, 0.0], [0.0278, -0.4625, 0.2023, 0.035], [-0.0691, 0.2482, 0.3648, -0.2431], [-0.07440000000000002, -0.1201, -0.1612, -0.4734]], [[1.0, 0.0, 0.0, 0.0], [0.02215, -0.33245, 0.11195, 0.03145], [-0.0612, 0.2032, 0.2233, -0.1832], [-0.07204999999999995, -0.07515, -0.09475, -0.33915]], [[1.0, 0.0, 0.0, 0.0], [0.016, -0.2106, 0.0488, 0.0223], [-0.05195, 0.15425, 0.11315, -0.12105], [-0.07020000000000004, -0.0334, -0.0445, -0.2141]], [[1.0, 0.0, 0.0, 0.0], [0.0512, -0.5461, 0.3323, 0.0204], [-0.1268, 0.2479, 0.5299, -0.2647], [-0.13175000000000003, -0.15655, -0.24345, -0.56235]], [[1.0, 0.0, 0.0, 0.0], [0.047, -0.4791, 0.2641, 0.0242], [-0.1208, 0.2286, 0.4367, -0.2385], [-0.12925000000000003, -0.13425, -0.19855, -0.49235]], [[1.0, 0.0, 0.0, 0.0], [0.04035, -0.38265, 0.17745, 0.02675], [-0.11125, 0.19925, 0.31295, -0.19855], [-0.12579999999999997, -0.1017, -0.1394, -0.3919]], [[1.0, 0.0, 0.0, 0.0], [0.03205, -0.27485, 0.09835, 0.02505], [-0.0988, 0.163, 0.1915, -0.15], [-0.12244999999999995, -0.06455, -0.08215, -0.28055]], [[1.0, 0.0, 0.0, 0.0], [0.0231, -0.174, 0.0429, 0.0183], [-0.0841, 0.1237, 0.0969, -0.0995], [-0.11989999999999995, -0.0298, -0.0387, -0.177]], [[1.0, 0.0, 0.0, 0.0], [0.0603, -0.4264, 0.2799, 0.0112], [-0.1748, 0.1863, 0.4341, -0.2022], [-0.19235000000000002, -0.12375, -0.20095, -0.43995]], [[1.0, 0.0, 0.0, 0.0], [0.05525, -0.37405, 0.22245, 0.01515], [-0.1668, 0.1718, 0.3577, -0.1825], [-0.18930000000000002, -0.1067, -0.164, -0.385]], [[1.0, 0.0, 0.0, 0.0], [0.0474, -0.2986, 0.1495, 0.0185], [-0.154, 0.1496, 0.2563, -0.1522], [-0.18534999999999996, -0.08145, -0.11525, -0.30625]], [[1.0, 0.0, 0.0, 0.0], [0.0375, -0.2143, 0.083, 0.0184], [-0.13715, 0.12225, 0.15675, -0.11535], [-0.18144999999999994, -0.05265, -0.06805, -0.21895]], [[1.0, 0.0, 0.0, 0.0], [0.0269, -0.1355, 0.0362, 0.0141], [-0.11715, 0.09265, 0.07925, -0.07695], [-0.17870000000000003, -0.0254, -0.0322, -0.1379]]]
set_26 = [[[1.0, 0.0, 0.0, 0.0], [0.01785, -0.0445500000000001, -0.00405, 0.95305], [0.0, 0.273, -0.8694, 0.0091], [-0.005649999999999988, 0.87705, 0.27585, 0.04415]], [[1.0, 0.0, 0.0, 0.0], [0.01715, -0.03925, -0.00275000000000003, 0.91555], [0.0, 0.2176, -0.693, 0.0072], [-0.005649999999999988, 0.72765, 0.22885, 0.04035]], [[1.0, 0.0, 0.0, 0.0], [0.0159999999999999, -0.0318999999999999, -0.00109999999999988, 0.8543], [0.0, 0.1471, -0.4686, 0.0049], [-0.005849999999999966, 0.52735, 0.16595, 0.03455]], [[1.0, 0.0, 0.0, 0.0], [0.0145, -0.0243, 0.000400000000000067, 0.7711], [0.0, 0.0825, -0.2626, 0.0027], [-0.006000000000000005, 0.3273, 0.1031, 0.0269]], [[1.0, 0.0, 0.0, 0.0], [0.0125999999999999, -0.0182, 0.00130000000000008, 0.6686], [0.0, 0.0368, -0.1171, 0.0012], [-0.006250000000000033, 0.16825, 0.05305, 0.01705]], [[1.0, 0.0, 0.0, 0.0], [0.06855, -0.04115, -0.00395000000000001, 0.86085], [0.0, 0.2582, -0.8222, 0.0086], [-0.022450000000000025, 0.82125, 0.25835, 0.04035]], [[1.0, 0.0, 0.0, 0.0], [0.0659000000000001, -0.0362000000000001, -0.00270000000000004, 0.827], [0.0, 0.2058, -0.6554, 0.0068], [-0.022699999999999998, 0.6814, 0.2144, 0.0369]], [[1.0, 0.0, 0.0, 0.0], [0.0616, -0.0293, -0.00109999999999999, 0.7717], [0.0, 0.1392, -0.4432, 0.0046], [-0.023200000000000054, 0.4938, 0.1554, 0.0316]], [[1.0, 0.0, 0.0, 0.0], [0.0557, -0.0222, 0.000300000000000078, 0.6965], [0.0, 0.078, -0.2483, 0.0026], [-0.023849999999999982, 0.30645, 0.09645, 0.02475]], [[1.0, 0.0, 0.0, 0.0], [0.0485, -0.0165999999999999, 0.00109999999999999, 0.6039], [0.0, 0.0348, -0.1107, 0.0012], [-0.02479999999999999, 0.1576, 0.0496, 0.0159]], [[1.0, 0.0, 0.0, 0.0], [0.1446, -0.0359999999999999, -0.00369999999999993, 0.7256], [0.0, 0.2351, -0.7486, 0.0078], [-0.05024999999999996, 0.73545, 0.23125, 0.03475]], [[1.0, 0.0, 0.0, 0.0], [0.13905, -0.03165, -0.00265000000000004, 0.69705], [0.0, 0.1874, -0.5967, 0.0062], [-0.050850000000000006, 0.61025, 0.19195, 0.03175]], [[1.0, 0.0, 0.0, 0.0], [0.12995, -0.02555, -0.00124999999999997, 0.65035], [0.0, 0.1267, -0.4035, 0.0042], [-0.05180000000000001, 0.4421, 0.1391, 0.0272]], [[1.0, 0.0, 0.0, 0.0], [0.1176, -0.0193000000000001, 9.9999999999989e-05, 0.587], [0.0, 0.071, -0.2261, 0.0024], [-0.05329999999999996, 0.2745, 0.0864, 0.0214]], [[1.0, 0.0, 0.0, 0.0], [0.10235, -0.0141499999999999, 0.000850000000000017, 0.50895], [0.0, 0.0317, -0.1008, 0.0011], [-0.05519999999999997, 0.1411, 0.0444, 0.014]], [[1.0, 0.0, 0.0, 0.0], [0.2354, -0.0297999999999999, -0.00339999999999996, 0.5694], [0.0, 0.2058, -0.6554, 0.0068], [-0.08875000000000005, 0.62885, 0.19775, 0.02815]], [[1.0, 0.0, 0.0, 0.0], [0.2263, -0.0261, -0.00250000000000006, 0.547], [0.0, 0.1641, -0.5224, 0.0055], [-0.08970000000000006, 0.5218, 0.1641, 0.0257]], [[1.0, 0.0, 0.0, 0.0], [0.21155, -0.02095, -0.00125000000000008, 0.51035], [0.0, 0.1109, -0.3532, 0.0037], [-0.09129999999999999, 0.3781, 0.119, 0.022]], [[1.0, 0.0, 0.0, 0.0], [0.19145, -0.01565, -4.99999999999945e-05, 0.46065], [0.0, 0.0622, -0.1979, 0.0021], [-0.09365000000000001, 0.23475, 0.07395, 0.01735]], [[1.0, 0.0, 0.0, 0.0], [0.16665, -0.01135, 0.000650000000000039, 0.39935], [0.0, 0.0277, -0.0882, 0.0009], [-0.09675000000000006, 0.12065, 0.03795, 0.01165]], [[1.0, 0.0, 0.0, 0.0], [0.32955, -0.02325, -0.00295000000000001, 0.41465], [0.0, 0.1729, -0.5508, 0.0058], [-0.13739999999999997, 0.5124, 0.1611, 0.0214]], [[1.0, 0.0, 0.0, 0.0], [0.31695, -0.02035, -0.00225000000000003, 0.39835], [0.0, 0.1379, -0.439, 0.0046], [-0.13864999999999994, 0.42515, 0.13365, 0.01945]], [[1.0, 0.0, 0.0, 0.0], [0.2963, -0.0163, -0.00130000000000002, 0.3717], [0.0, 0.0932, -0.2968, 0.0031], [-0.14090000000000003, 0.3081, 0.0969, 0.0166]], [[1.0, 0.0, 0.0, 0.0], [0.26815, -0.01205, -0.000249999999999972, 0.33545], [0.0, 0.0522, -0.1663, 0.0017], [-0.14409999999999995, 0.1913, 0.0602, 0.0132]], [[1.0, 0.0, 0.0, 0.0], [0.2334, -0.00850000000000001, 0.000400000000000011, 0.2908], [0.0, 0.0233, -0.0742, 0.0008], [-0.14840000000000003, 0.0983, 0.031, 0.0091]]]
set_27 = [[[1.0, 0.0, 0.0, 0.0], [0.00150000000000006, -0.7563, 0.0569999999999999, -0.5356], [-0.000150000000000004, 0.11735, 0.90415, -0.05635], [-0.013500000000000012, 0.5088, -0.1238, -0.7857]], [[1.0, 0.0, 0.0, 0.0], [0.00124999999999997, -0.62355, 0.02835, -0.50525], [-0.000150000000000001, 0.09685, 0.72895, -0.02665], [-0.013349999999999973, 0.41945, -0.12695, -0.75665]], [[1.0, 0.0, 0.0, 0.0], [0.001, -0.4499, -0.00800000000000001, -0.4542], [-0.0001, 0.0695, 0.5035, 0.0057], [-0.013100000000000056, 0.2953, -0.1328, -0.713]], [[1.0, 0.0, 0.0, 0.0], [0.000700000000000034, -0.2819, -0.0399, -0.3839], [-4.9999999999998e-05, 0.04255, 0.29255, 0.02635], [-0.012850000000000028, 0.16495, -0.14165, -0.66105]], [[1.0, 0.0, 0.0, 0.0], [0.000400000000000011, -0.1518, -0.0575, -0.2993], [-4.9999999999998e-05, 0.02155, 0.13865, 0.02995], [-0.012600000000000111, 0.0548000000000001, -0.1539, -0.6078]], [[1.0, 0.0, 0.0, 0.0], [0.00625000000000003, -0.70955, 0.05825, -0.48575], [-0.000699999999999999, 0.11, 0.8528, -0.0584], [-0.05234999999999995, 0.47665, -0.11015, -0.70955]], [[1.0, 0.0, 0.0, 0.0], [0.00535000000000002, -0.58465, 0.03115, -0.45855], [-0.0006, 0.0908, 0.6874, -0.0296], [-0.05180000000000001, 0.3937, -0.1129, -0.6829]], [[1.0, 0.0, 0.0, 0.0], [0.00419999999999998, -0.4213, -0.00329999999999997, -0.4126], [-0.00045, 0.06515, 0.47465, 0.00205], [-0.05095000000000005, 0.27815, -0.11795, -0.64285]], [[1.0, 0.0, 0.0, 0.0], [0.00295000000000001, -0.26345, -0.03365, -0.34915], [-0.000300000000000002, 0.0399, 0.2756, 0.0226], [-0.04994999999999994, 0.15675, -0.12595, -0.59525]], [[1.0, 0.0, 0.0, 0.0], [0.00184999999999996, -0.14155, -0.05095, -0.27245], [-0.000150000000000001, 0.02015, 0.13045, 0.02695], [-0.04899999999999999, 0.0541, -0.1369, -0.5464]], [[1.0, 0.0, 0.0, 0.0], [0.015, -0.6377, 0.0591, -0.412], [-0.0019, 0.0987, 0.773, -0.0608], [-0.11234999999999995, 0.42695, -0.0903499999999999, -0.59805]], [[1.0, 0.0, 0.0, 0.0], [0.01305, -0.52495, 0.03455, -0.38945], [-0.00165, 0.08145, 0.62295, -0.03345], [-0.11119999999999997, 0.3535, -0.0925999999999999, -0.575]], [[1.0, 0.0, 0.0, 0.0], [0.01035, -0.37745, 0.00315000000000004, -0.35105], [-0.0013, 0.0585, 0.43, -0.0032], [-0.10955000000000004, 0.25115, -0.09675, -0.54035]], [[1.0, 0.0, 0.0, 0.0], [0.00749999999999995, -0.2353, -0.0248999999999999, -0.2976], [-0.00085, 0.03575, 0.24945, 0.01715], [-0.10759999999999997, 0.1435, -0.1033, -0.4991]], [[1.0, 0.0, 0.0, 0.0], [0.005, -0.1259, -0.0414, -0.2326], [-0.0005, 0.0181, 0.1179, 0.0225], [-0.10565000000000002, 0.05225, -0.11245, -0.45695]], [[1.0, 0.0, 0.0, 0.0], [0.02835, -0.54855, 0.05845, -0.32585], [-0.00415, 0.08475, 0.67275, -0.06235], [-0.18779999999999997, 0.3647, -0.0683, -0.4696]], [[1.0, 0.0, 0.0, 0.0], [0.025, -0.4509, 0.037, -0.3086], [-0.00355, 0.06995, 0.54185, -0.03715], [-0.18609999999999993, 0.3029, -0.07, -0.4508]], [[1.0, 0.0, 0.0, 0.0], [0.02025, -0.32335, 0.00944999999999996, -0.27895], [-0.00275, 0.05015, 0.37375, -0.00885], [-0.18354999999999994, 0.21675, -0.07305, -0.42265]], [[1.0, 0.0, 0.0, 0.0], [0.01505, -0.20075, -0.01535, -0.23715], [-0.00185, 0.03075, 0.21655, 0.01095], [-0.18064999999999998, 0.12605, -0.07805, -0.38915]], [[1.0, 0.0, 0.0, 0.0], [0.01025, -0.10655, -0.03055, -0.18585], [-0.00105, 0.01545, 0.10215, 0.01735], [-0.17779999999999996, 0.0488999999999999, -0.0851000000000001, -0.3548]], [[1.0, 0.0, 0.0, 0.0], [0.0463, -0.451, 0.0552, -0.2393], [-0.00745, 0.06935, 0.56105, -0.06185], [-0.2728999999999999, 0.296, -0.0475, -0.3428]], [[1.0, 0.0, 0.0, 0.0], [0.04115, -0.36995, 0.03745, -0.22725], [-0.0064, 0.0572, 0.4517, -0.0394], [-0.27075000000000005, 0.24675, -0.04855, -0.32845]], [[1.0, 0.0, 0.0, 0.0], [0.03385, -0.26445, 0.01445, -0.20625], [-0.0049, 0.041, 0.3112, -0.0137], [-0.26764999999999994, 0.17805, -0.05055, -0.30685]], [[1.0, 0.0, 0.0, 0.0], [0.02575, -0.16325, -0.00664999999999999, -0.17605], [-0.0033, 0.0251, 0.18, 0.0049], [-0.26394999999999996, 0.10545, -0.05405, -0.28125]], [[1.0, 0.0, 0.0, 0.0], [0.0181, -0.086, -0.0202, -0.1385], [-0.00195, 0.01275, 0.08475, 0.01205], [-0.2603500000000001, 0.04365, -0.05905, -0.25505]]]
set_28 = [[[1.0, 0.0, 0.0, 0.0], [0.00470000000000001, -0.6807, 0.5656, 0.2411], [0.00509999999999999, -0.5835, -0.4381, -0.5786], [-0.019500000000000017, -0.2218, -0.5866, 0.6965]], [[1.0, 0.0, 0.0, 0.0], [0.00455, -0.56415, 0.46845, 0.16755], [0.00454999999999994, -0.51395, -0.35185, -0.52135], [-0.01934999999999998, -0.15855, -0.52385, 0.63715]], [[1.0, 0.0, 0.0, 0.0], [0.00420000000000001, -0.4124, 0.3386, 0.0767], [0.00375000000000003, -0.41315, -0.24125, -0.43515], [-0.01915, -0.07175, -0.43085, 0.55255]], [[1.0, 0.0, 0.0, 0.0], [0.00365, -0.26665, 0.20945, -0.00095], [0.00280000000000002, -0.2995, -0.1381, -0.3324], [-0.018899999999999972, 0.0175, -0.3223, 0.4596]], [[1.0, 0.0, 0.0, 0.0], [0.003, -0.154, 0.1071, -0.0462], [0.00184999999999999, -0.19185, -0.06365, -0.22745], [-0.018699999999999994, 0.0905, -0.2147, 0.3745]], [[1.0, 0.0, 0.0, 0.0], [0.0183, -0.6381, 0.5298, 0.2344], [0.0194, -0.5384, -0.4135, -0.5309], [-0.07549999999999996, -0.2139, -0.5395, 0.6372]], [[1.0, 0.0, 0.0, 0.0], [0.0176, -0.5282, 0.4389, 0.1646], [0.01735, -0.47425, -0.33215, -0.47845], [-0.07505000000000006, -0.15485, -0.48185, 0.58225]], [[1.0, 0.0, 0.0, 0.0], [0.01625, -0.38535, 0.31715, 0.07815], [0.0143, -0.3812, -0.2277, -0.3994], [-0.07430000000000003, -0.0739, -0.3963, 0.5039]], [[1.0, 0.0, 0.0, 0.0], [0.01425, -0.24835, 0.19615, 0.00395], [0.0107, -0.2763, -0.1304, -0.3051], [-0.07355, 0.00955, -0.29645, 0.41785]], [[1.0, 0.0, 0.0, 0.0], [0.01165, -0.14275, 0.10035, -0.03985], [0.00714999999999999, -0.17695, -0.06005, -0.20885], [-0.07279999999999998, 0.0777, -0.1974, 0.3391]], [[1.0, 0.0, 0.0, 0.0], [0.03925, -0.57265, 0.47475, 0.22265], [0.0403, -0.4705, -0.3754, -0.4594], [-0.16159999999999997, -0.2001, -0.4686, 0.549]], [[1.0, 0.0, 0.0, 0.0], [0.0377, -0.4732, 0.3933, 0.1587], [0.03595, -0.41425, -0.30145, -0.41405], [-0.16064999999999996, -0.14765, -0.41855, 0.50075]], [[1.0, 0.0, 0.0, 0.0], [0.03475, -0.34405, 0.28425, 0.07925], [0.02955, -0.33295, -0.20665, -0.34575], [-0.15925000000000006, -0.0756500000000001, -0.34425, 0.43185]], [[1.0, 0.0, 0.0, 0.0], [0.03045, -0.22055, 0.17575, 0.01055], [0.02215, -0.24125, -0.11825, -0.26425], [-0.15765000000000007, -0.00135000000000002, -0.25755, 0.35625]], [[1.0, 0.0, 0.0, 0.0], [0.0248, -0.1257, 0.0899, -0.0307], [0.01475, -0.15445, -0.05445, -0.18095], [-0.15615, 0.05955, -0.17165, 0.28715]], [[1.0, 0.0, 0.0, 0.0], [0.0653, -0.4919, 0.4064, 0.2054], [0.0639, -0.3885, -0.3272, -0.3742], [-0.26839999999999997, -0.1802, -0.3838, 0.4451]], [[1.0, 0.0, 0.0, 0.0], [0.06255, -0.40545, 0.33665, 0.14895], [0.05705, -0.34215, -0.26275, -0.33735], [-0.26695, -0.13595, -0.34285, 0.40485]], [[1.0, 0.0, 0.0, 0.0], [0.05765, -0.29345, 0.24325, 0.07855], [0.0469, -0.2749, -0.1801, -0.2818], [-0.26485000000000003, -0.07495, -0.28205, 0.34755]], [[1.0, 0.0, 0.0, 0.0], [0.05035, -0.18665, 0.15045, 0.01725], [0.0351, -0.1991, -0.103, -0.2155], [-0.26249999999999996, -0.012, -0.2111, 0.2846]], [[1.0, 0.0, 0.0, 0.0], [0.04095, -0.10515, 0.07695, -0.02045], [0.0234, -0.1273, -0.0474, -0.1477], [-0.2603, 0.0399, -0.1406, 0.2271]], [[1.0, 0.0, 0.0, 0.0], [0.09385, -0.40395, 0.33155, 0.18275], [0.0866, -0.3026, -0.2735, -0.286], [-0.38549999999999995, -0.155, -0.2955, 0.339]], [[1.0, 0.0, 0.0, 0.0], [0.08975, -0.33195, 0.27465, 0.13485], [0.07725, -0.26635, -0.21955, -0.25795], [-0.38369999999999993, -0.1195, -0.264, 0.3073]], [[1.0, 0.0, 0.0, 0.0], [0.0825, -0.2389, 0.1985, 0.075], [0.0635, -0.2139, -0.1504, -0.2156], [-0.3811, -0.0705, -0.2172, 0.2622]], [[1.0, 0.0, 0.0, 0.0], [0.0719, -0.1505, 0.1227, 0.0223], [0.0475, -0.1548, -0.086, -0.165], [-0.37820000000000004, -0.0198, -0.1626, 0.2127]], [[1.0, 0.0, 0.0, 0.0], [0.0583, -0.0836, 0.0628, -0.0108], [0.03165, -0.09895, -0.03955, -0.11315], [-0.37549999999999994, 0.0221, -0.1084, 0.1675]]]
set_29 = [[[1.0, 0.0, 0.0, 0.0], [-0.00849999999999995, 0.6019, -0.296, -0.6328], [-0.000700000000000034, 0.5308, -0.3582, 0.6811], [-0.013800000000000034, -0.4432, -0.8143, -0.0527]], [[1.0, 0.0, 0.0, 0.0], [-0.00724999999999998, 0.49105, -0.25565, -0.53755], [-0.00109999999999999, 0.4454, -0.3467, 0.6072], [-0.013349999999999973, -0.34895, -0.72155, -0.02555]], [[1.0, 0.0, 0.0, 0.0], [-0.00549999999999995, 0.3459, -0.1971, -0.406], [-0.00175000000000003, 0.33345, -0.31915, 0.49895], [-0.012750000000000039, -0.22475, -0.59025, 0.01115]], [[1.0, 0.0, 0.0, 0.0], [-0.00364999999999999, 0.20635, -0.13245, -0.26855], [-0.00229999999999997, 0.2239, -0.2715, 0.3744], [-0.01204999999999995, -0.10515, -0.44665, 0.04705]], [[1.0, 0.0, 0.0, 0.0], [-0.00209999999999999, 0.1008, -0.0745, -0.1515], [-0.00275000000000003, 0.13635, -0.20665, 0.25165], [-0.01144999999999996, -0.01675, -0.31355, 0.07155]], [[1.0, 0.0, 0.0, 0.0], [-0.03225, 0.56605, -0.27435, -0.58915], [-0.00380000000000003, 0.4961, -0.3223, 0.6267], [-0.05404999999999999, -0.41985, -0.75075, -0.05375]], [[1.0, 0.0, 0.0, 0.0], [-0.02755, 0.46175, -0.23705, -0.50045], [-0.00539999999999996, 0.4158, -0.3129, 0.5586], [-0.05244999999999994, -0.33125, -0.66455, -0.02845]], [[1.0, 0.0, 0.0, 0.0], [-0.02095, 0.32515, -0.18305, -0.37795], [-0.0076, 0.3105, -0.2893, 0.4589], [-0.05010000000000003, -0.2146, -0.5429, 0.0059]], [[1.0, 0.0, 0.0, 0.0], [-0.014, 0.194, -0.1231, -0.2499], [-0.00969999999999999, 0.2078, -0.247, 0.3442], [-0.04750000000000004, -0.1021, -0.41, 0.0397]], [[1.0, 0.0, 0.0, 0.0], [-0.00794999999999998, 0.09475, -0.06935, -0.14095], [-0.0111, 0.1259, -0.1886, 0.2313], [-0.04519999999999996, -0.0187, -0.287, 0.0631]], [[1.0, 0.0, 0.0, 0.0], [-0.0666, 0.5105, -0.2413, -0.5226], [-0.0118999999999999, 0.4431, -0.2693, 0.545], [-0.11769999999999997, -0.3829, -0.6552, -0.0544]], [[1.0, 0.0, 0.0, 0.0], [-0.0568, 0.4164, -0.2089, -0.4438], [-0.0151000000000001, 0.3706, -0.263, 0.4856], [-0.11434999999999995, -0.30325, -0.57925, -0.03185]], [[1.0, 0.0, 0.0, 0.0], [-0.04325, 0.29315, -0.16155, -0.33505], [-0.0193, 0.2756, -0.245, 0.3987], [-0.10959999999999998, -0.1981, -0.472, -0.0011]], [[1.0, 0.0, 0.0, 0.0], [-0.02885, 0.17475, -0.10895, -0.22145], [-0.02325, 0.18335, -0.21055, 0.29885], [-0.10424999999999995, -0.09655, -0.35515, 0.02935]], [[1.0, 0.0, 0.0, 0.0], [-0.01645, 0.08535, -0.06155, -0.12485], [-0.0256, 0.1102, -0.1616, 0.2008], [-0.09935000000000005, -0.02105, -0.24755, 0.05095]], [[1.0, 0.0, 0.0, 0.0], [-0.1048, 0.4411, -0.201, -0.4409], [-0.0283, 0.3781, -0.2076, 0.4472], [-0.19985000000000003, -0.33545, -0.54075, -0.05325]], [[1.0, 0.0, 0.0, 0.0], [-0.08945, 0.35965, -0.17435, -0.37435], [-0.0329, 0.3152, -0.2048, 0.3982], [-0.19470000000000004, -0.2668, -0.477, -0.0342]], [[1.0, 0.0, 0.0, 0.0], [-0.068, 0.2531, -0.1353, -0.2825], [-0.03885, 0.23325, -0.19305, 0.32665], [-0.18714999999999998, -0.17615, -0.38735, -0.00805]], [[1.0, 0.0, 0.0, 0.0], [-0.04535, 0.15075, -0.09155, -0.18665], [-0.04405, 0.15375, -0.16775, 0.24465], [-0.17874999999999996, -0.08845, -0.28995, 0.01815]], [[1.0, 0.0, 0.0, 0.0], [-0.02585, 0.07355, -0.05185, -0.10515], [-0.0467, 0.0915, -0.1298, 0.1643], [-0.17094999999999994, -0.02285, -0.20075, 0.03725]], [[1.0, 0.0, 0.0, 0.0], [-0.13975, 0.36435, -0.15795, -0.35305], [-0.05555, 0.30795, -0.14605, 0.34545], [-0.2945, -0.281, -0.4213, -0.0496]], [[1.0, 0.0, 0.0, 0.0], [-0.1192, 0.297, -0.1374, -0.2997], [-0.06085, 0.25575, -0.14645, 0.30735], [-0.2876000000000001, -0.2247, -0.3707, -0.0343]], [[1.0, 0.0, 0.0, 0.0], [-0.09065, 0.20895, -0.10705, -0.22605], [-0.0675, 0.1879, -0.1406, 0.2518], [-0.27760000000000007, -0.1501, -0.2997, -0.0132]], [[1.0, 0.0, 0.0, 0.0], [-0.06045, 0.12435, -0.07285, -0.14925], [-0.07285, 0.12265, -0.12415, 0.18835], [-0.2663, -0.0778, -0.2229, 0.00820000000000001]], [[1.0, 0.0, 0.0, 0.0], [-0.0344, 0.0606, -0.0415, -0.084], [-0.0743, 0.0718, -0.0973, 0.1264], [-0.2558500000000001, -0.02335, -0.15295, 0.02435]]]
set_30 = [[[1.0, 0.0, 0.0, 0.0], [-0.01115, 0.56605, -0.02945, -0.73355], [0.0, 0.0, 0.9106, -0.0366], [0.0015999999999998793, 0.7548, 0.023, 0.5721]], [[1.0, 0.0, 0.0, 0.0], [-0.00949999999999995, 0.494, -0.0249, -0.6188], [0.0, 0.0, 0.7258, -0.0292], [0.0011499999999999844, 0.71255, 0.02095, 0.52335]], [[1.0, 0.0, 0.0, 0.0], [-0.00724999999999998, 0.39165, -0.01845, -0.46055], [0.0, 0.0, 0.4908, -0.0197], [0.00039999999999995595, 0.6444, 0.0181, 0.4498]], [[1.0, 0.0, 0.0, 0.0], [-0.00479999999999997, 0.2795, -0.0119, -0.2962], [0.0, 0.0, 0.275, -0.011], [-0.00039999999999995595, 0.5539, 0.0145, 0.3611]], [[1.0, 0.0, 0.0, 0.0], [-0.00260000000000002, 0.1769, -0.00639999999999999, -0.1584], [0.0, 0.0, 0.1226, -0.0049], [-0.0012499999999999734, 0.44685, 0.01075, 0.26755]], [[1.0, 0.0, 0.0, 0.0], [-0.04235, 0.52355, -0.02745, -0.68405], [0.0, 0.0, 0.8612, -0.0346], [0.005199999999999927, 0.6846, 0.021, 0.5231]], [[1.0, 0.0, 0.0, 0.0], [-0.0361499999999999, 0.45675, -0.0232500000000001, -0.57715], [0.0, 0.0, 0.6864, -0.0276], [0.003400000000000014, 0.6464, 0.0192, 0.4784]], [[1.0, 0.0, 0.0, 0.0], [-0.0275, 0.362, -0.0172, -0.4297], [0.0, 0.0, 0.4641, -0.0186], [0.0006999999999999229, 0.5848, 0.0165, 0.411]], [[1.0, 0.0, 0.0, 0.0], [-0.0181, 0.2581, -0.0111, -0.2765], [0.0, 0.0, 0.2601, -0.0104], [-0.0023999999999999577, 0.5028, 0.0131999999999999, 0.3298]], [[1.0, 0.0, 0.0, 0.0], [-0.00989999999999999, 0.1633, -0.00590000000000002, -0.148], [0.0, 0.0, 0.116, -0.0047], [-0.005500000000000005, 0.4057, 0.0098, 0.2443]], [[1.0, 0.0, 0.0, 0.0], [-0.08755, 0.45935, -0.02445, -0.60825], [0.0, 0.0, 0.7841, -0.0315], [0.007600000000000051, 0.5811, 0.0181, 0.4501]], [[1.0, 0.0, 0.0, 0.0], [-0.07475, 0.40055, -0.02065, -0.51335], [0.0, 0.0, 0.625, -0.0251], [0.003950000000000065, 0.54875, 0.01655, 0.41145]], [[1.0, 0.0, 0.0, 0.0], [-0.0567, 0.3172, -0.0154, -0.3824], [0.0, 0.0, 0.4226, -0.017], [-0.0014500000000000068, 0.49655, 0.01415, 0.35325]], [[1.0, 0.0, 0.0, 0.0], [-0.03735, 0.22595, -0.00985, -0.24625], [0.0, 0.0, 0.2368, -0.0095], [-0.007800000000000029, 0.4272, 0.0114, 0.2832]], [[1.0, 0.0, 0.0, 0.0], [-0.02025, 0.14265, -0.00535000000000002, -0.13205], [0.0, 0.0, 0.1056, -0.0042], [-0.014100000000000001, 0.3448, 0.00839999999999999, 0.2096]], [[1.0, 0.0, 0.0, 0.0], [-0.13805, 0.38165, -0.02075, -0.51485], [0.0, 0.0, 0.6864, -0.0276], [0.004550000000000054, 0.46035, 0.01465, 0.36385]], [[1.0, 0.0, 0.0, 0.0], [-0.11775, 0.33265, -0.01745, -0.43465], [0.0, 0.0, 0.5472, -0.022], [-0.001100000000000101, 0.435, 0.0133, 0.3324]], [[1.0, 0.0, 0.0, 0.0], [-0.0892, 0.2631, -0.013, -0.324], [0.0, 0.0, 0.37, -0.0149], [-0.009500000000000064, 0.3939, 0.0115, 0.2851]], [[1.0, 0.0, 0.0, 0.0], [-0.0585, 0.187, -0.00840000000000002, -0.2089], [0.0, 0.0, 0.2073, -0.0083], [-0.01924999999999999, 0.33905, 0.00915000000000002, 0.22825]], [[1.0, 0.0, 0.0, 0.0], [-0.0315, 0.1178, -0.0045, -0.1122], [0.0, 0.0, 0.0924, -0.0037], [-0.02889999999999998, 0.2739, 0.0068, 0.1688]], [[1.0, 0.0, 0.0, 0.0], [-0.1843, 0.2997, -0.0166, -0.4139], [0.0, 0.0, 0.5768, -0.0232], [-0.008399999999999963, 0.3395, 0.0110999999999999, 0.2756]], [[1.0, 0.0, 0.0, 0.0], [-0.15705, 0.26105, -0.01405, -0.34955], [0.0, 0.0, 0.4598, -0.0185], [-0.015749999999999986, 0.32085, 0.01015, 0.25155]], [[1.0, 0.0, 0.0, 0.0], [-0.1187, 0.2061, -0.0104, -0.2608], [0.0, 0.0, 0.3109, -0.0125], [-0.026650000000000007, 0.29075, 0.00875000000000001, 0.21545]], [[1.0, 0.0, 0.0, 0.0], [-0.07745, 0.14625, -0.00674999999999998, -0.16845], [0.0, 0.0, 0.1742, -0.007], [-0.039299999999999946, 0.2505, 0.00689999999999999, 0.1722]], [[1.0, 0.0, 0.0, 0.0], [-0.0411, 0.0918, -0.00370000000000001, -0.0907], [0.0, 0.0, 0.0777, -0.0031], [-0.05190000000000006, 0.2026, 0.00509999999999999, 0.1271]]]
set_31 = [[[1.0, 0.0, 0.0, 0.0], [-0.00585000000000002, -0.20525, 4.99999999999945e-05, 0.91025], [-0.01305, 0.59155, -0.69785, 0.13235], [-0.012699999999999989, 0.6883, 0.6031, 0.154]], [[1.0, 0.0, 0.0, 0.0], [-0.00509999999999999, -0.1815, 0.0, 0.7982], [-0.01225, 0.50935, -0.58105, 0.11135], [-0.01204999999999995, 0.59905, 0.50805, 0.13095]], [[1.0, 0.0, 0.0, 0.0], [-0.00405, -0.14415, 4.99999999999945e-05, 0.63715], [-0.01095, 0.39415, -0.42335, 0.08025], [-0.011100000000000054, 0.4717, 0.3783, 0.096]], [[1.0, 0.0, 0.0, 0.0], [-0.00284999999999996, -0.09895, -5.000000000005e-05, 0.45775], [-0.0094, 0.2708, -0.2646, 0.0445], [-0.00990000000000002, 0.3312, 0.245, 0.0544]], [[1.0, 0.0, 0.0, 0.0], [-0.00174999999999997, -0.05525, -5.000000000005e-05, 0.29035], [-0.00765, 0.16145, -0.13715, 0.01045], [-0.008650000000000047, 0.20205, 0.13415, 0.01315]], [[1.0, 0.0, 0.0, 0.0], [-0.0216, -0.1889, 0.0, 0.8409], [-0.05075, 0.54895, -0.65285, 0.12335], [-0.049449999999999994, 0.63695, 0.56265, 0.14315]], [[1.0, 0.0, 0.0, 0.0], [-0.0188, -0.1674, 0.0, 0.7373], [-0.0476, 0.4726, -0.5436, 0.104], [-0.047099999999999975, 0.5544, 0.474, 0.122]], [[1.0, 0.0, 0.0, 0.0], [-0.0148, -0.1334, 0.0, 0.5884], [-0.0427, 0.3656, -0.3962, 0.0754], [-0.04344999999999999, 0.43655, 0.35285, 0.09005]], [[1.0, 0.0, 0.0, 0.0], [-0.0105, -0.0919000000000001, -5.55111512312578e-17, 0.4226], [-0.03665, 0.25105, -0.24765, 0.04245], [-0.03895000000000004, 0.30655, 0.22845, 0.05185]], [[1.0, 0.0, 0.0, 0.0], [-0.00655, -0.05155, 4.99999999999945e-05, 0.26795], [-0.02995, 0.14965, -0.12825, 0.01105], [-0.03394999999999998, 0.18705, 0.12505, 0.01385]], [[1.0, 0.0, 0.0, 0.0], [-0.04245, -0.16425, 4.99999999999945e-05, 0.73615], [-0.1089, 0.4843, -0.5837, 0.1094], [-0.10644999999999999, 0.55925, 0.50075, 0.12635]], [[1.0, 0.0, 0.0, 0.0], [-0.0368999999999999, -0.1461, -1.11022302462516e-16, 0.6453], [-0.10225, 0.41675, -0.48615, 0.09265], [-0.10159999999999997, 0.4867, 0.4218, 0.1082]], [[1.0, 0.0, 0.0, 0.0], [-0.0291, -0.117, 0.0, 0.5148], [-0.092, 0.3223, -0.3543, 0.0678], [-0.09410000000000002, 0.3832, 0.3139, 0.0806]], [[1.0, 0.0, 0.0, 0.0], [-0.0205, -0.0811, 0.0, 0.3695], [-0.07915, 0.22115, -0.22155, 0.03905], [-0.08475000000000005, 0.26905, 0.20315, 0.04755]], [[1.0, 0.0, 0.0, 0.0], [-0.0127, -0.0459, 0.0, 0.2341], [-0.0648, 0.1317, -0.1148, 0.0116], [-0.07440000000000002, 0.1642, 0.1112, 0.0145]], [[1.0, 0.0, 0.0, 0.0], [-0.0619999999999999, -0.1342, 0.0, 0.6096], [-0.1812, 0.4055, -0.498, 0.0921], [-0.17809999999999998, 0.4651, 0.4245, 0.1056]], [[1.0, 0.0, 0.0, 0.0], [-0.0538, -0.1201, 0.0, 0.5343], [-0.1704, 0.3488, -0.4148, 0.0784], [-0.17054999999999998, 0.40475, 0.35755, 0.09095]], [[1.0, 0.0, 0.0, 0.0], [-0.0422, -0.0968999999999999, 0.0, 0.426], [-0.1537, 0.2695, -0.3024, 0.058], [-0.15874999999999995, 0.31855, 0.26595, 0.06855]], [[1.0, 0.0, 0.0, 0.0], [-0.0296, -0.0679, 0.0, 0.3055], [-0.1326, 0.1847, -0.1892, 0.0344], [-0.14399999999999996, 0.2237, 0.172, 0.0417]], [[1.0, 0.0, 0.0, 0.0], [-0.01825, -0.03875, 5.00000000000222e-05, 0.19335], [-0.109, 0.1099, -0.0981, 0.0117], [-0.12755000000000005, 0.13655, 0.09405, 0.01455]], [[1.0, 0.0, 0.0, 0.0], [-0.0743, -0.1024, -5.55111512312578e-17, 0.4765], [-0.26, 0.3216, -0.4046, 0.0732], [-0.25784999999999997, 0.36545, 0.34205, 0.08315]], [[1.0, 0.0, 0.0, 0.0], [-0.0642, -0.0924, 0.0, 0.4175], [-0.245, 0.2764, -0.3371, 0.0627], [-0.24784999999999996, 0.31795, 0.28805, 0.07205]], [[1.0, 0.0, 0.0, 0.0], [-0.05005, -0.07555, -4.99999999999945e-05, 0.33265], [-0.22165, 0.21335, -0.24575, 0.04705], [-0.23219999999999996, 0.2502, 0.2142, 0.0551]], [[1.0, 0.0, 0.0, 0.0], [-0.0348, -0.0536, 2.77555756156289e-17, 0.2383], [-0.19205, 0.14605, -0.15375, 0.02875], [-0.21249999999999997, 0.1757, 0.1385, 0.0346]], [[1.0, 0.0, 0.0, 0.0], [-0.0212, -0.0311, 2.77555756156289e-17, 0.1506], [-0.15865, 0.08675, -0.07975, 0.01115], [-0.19035000000000002, 0.10725, 0.07565, 0.01375]]]
set_32 = [[[1.0, 0.0, 0.0, 0.0], [-0.00434999999999997, 0.02405, 0.41795, -0.81515], [-0.00919999999999999, 0.0745, 0.833, 0.4345], [-0.00495000000000001, 0.92155, -0.09645, -0.03045]], [[1.0, 0.0, 0.0, 0.0], [-0.00364999999999993, 0.03065, 0.33315, -0.66805], [-0.00874999999999998, 0.02125, 0.76245, 0.40735], [-0.004650000000000043, 0.78485, -0.11985, -0.06045]], [[1.0, 0.0, 0.0, 0.0], [-0.00259999999999999, 0.0348, 0.2252, -0.4742], [-0.00805, -0.04405, 0.65685, 0.36875], [-0.0042500000000000315, 0.60035, -0.14775, -0.09775]], [[1.0, 0.0, 0.0, 0.0], [-0.00159999999999999, 0.0322, 0.1262, -0.286], [-0.00719999999999998, -0.0984, 0.5307, 0.3246], [-0.0038000000000000256, 0.4142, -0.1688, -0.1302]], [[1.0, 0.0, 0.0, 0.0], [-0.000799999999999995, 0.0232, 0.0562, -0.1416], [-0.00619999999999998, -0.1274, 0.3981, 0.2782], [-0.003449999999999953, 0.26365, -0.17425, -0.15015]], [[1.0, 0.0, 0.0, 0.0], [-0.0165500000000001, 0.0194500000000001, 0.39525, -0.76565], [-0.0361, 0.0803, 0.7617, 0.3952], [-0.019500000000000017, 0.8579, -0.0802, -0.0196]], [[1.0, 0.0, 0.0, 0.0], [-0.0137, 0.0263, 0.315, -0.6275], [-0.03435, 0.02965, 0.69675, 0.36995], [-0.018399999999999972, 0.7301, -0.1025, -0.048]], [[1.0, 0.0, 0.0, 0.0], [-0.00984999999999997, 0.03095, 0.21295, -0.44535], [-0.03165, -0.03255, 0.59995, 0.33425], [-0.01685000000000003, 0.55745, -0.12925, -0.08345]], [[1.0, 0.0, 0.0, 0.0], [-0.00609999999999999, 0.0292, 0.1194, -0.2685], [-0.02825, -0.08475, 0.48435, 0.29365], [-0.015200000000000047, 0.3834, -0.1498, -0.1144]], [[1.0, 0.0, 0.0, 0.0], [-0.00314999999999999, 0.02135, 0.05325, -0.13295], [-0.02425, -0.11315, 0.36305, 0.25135], [-0.013700000000000045, 0.2426, -0.156, -0.1336]], [[1.0, 0.0, 0.0, 0.0], [-0.0341, 0.0128, 0.3599, -0.6892], [-0.07875, 0.0871500000000001, 0.65545, 0.33735], [-0.04269999999999996, 0.761, -0.0574, -0.0048]], [[1.0, 0.0, 0.0, 0.0], [-0.0282, 0.0198, 0.2868, -0.5648], [-0.075, 0.0407, 0.5992, 0.315], [-0.04045000000000004, 0.64665, -0.07795, -0.03075]], [[1.0, 0.0, 0.0, 0.0], [-0.02035, 0.02525, 0.19395, -0.40075], [-0.06915, -0.01665, 0.51535, 0.28355], [-0.037349999999999994, 0.49245, -0.10285, -0.06325]], [[1.0, 0.0, 0.0, 0.0], [-0.01255, 0.02475, 0.10865, -0.24155], [-0.0617, -0.0653, 0.4155, 0.2483], [-0.03394999999999998, 0.33685, -0.12245, -0.09175]], [[1.0, 0.0, 0.0, 0.0], [-0.0064, 0.0185, 0.0484, -0.1196], [-0.05305, -0.09245, 0.31105, 0.21195], [-0.030999999999999972, 0.2111, -0.1295, -0.1096]], [[1.0, 0.0, 0.0, 0.0], [-0.0534, 0.00509999999999999, 0.315, -0.5938], [-0.13375, 0.09215, 0.53005, 0.26995], [-0.07359999999999994, 0.6426, -0.0328, 0.0104]], [[1.0, 0.0, 0.0, 0.0], [-0.0442, 0.0123, 0.2511, -0.4865], [-0.1275, 0.051, 0.484, 0.2511], [-0.07014999999999999, 0.54505, -0.05095, -0.01245]], [[1.0, 0.0, 0.0, 0.0], [-0.03195, 0.01855, 0.16985, -0.34515], [-0.11765, 5.00000000000222e-05, 0.41545, 0.22485], [-0.06535000000000002, 0.41355, -0.07335, -0.04115]], [[1.0, 0.0, 0.0, 0.0], [-0.01975, 0.01955, 0.09515, -0.20795], [-0.10515, -0.04365, 0.33435, 0.19585], [-0.06020000000000003, 0.2809, -0.0915, -0.0665]], [[1.0, 0.0, 0.0, 0.0], [-0.01005, 0.01505, 0.04245, -0.10285], [-0.0906, -0.069, 0.2499, 0.1665], [-0.05560000000000004, 0.1737, -0.0992, -0.0826]], [[1.0, 0.0, 0.0, 0.0], [-0.0709, -0.00210000000000005, 0.2648, -0.4887], [-0.19665, 0.09265, 0.40195, 0.20255], [-0.11125000000000002, 0.51575, -0.01105, 0.02255]], [[1.0, 0.0, 0.0, 0.0], [-0.0587, 0.00510000000000005, 0.211, -0.4003], [-0.18765, 0.05775, 0.36645, 0.18735], [-0.10685000000000006, 0.43665, -0.02645, 0.00325]], [[1.0, 0.0, 0.0, 0.0], [-0.04235, 0.01165, 0.14265, -0.28385], [-0.17345, 0.01415, 0.31385, 0.16645], [-0.10064999999999996, 0.32975, -0.04565, -0.02105]], [[1.0, 0.0, 0.0, 0.0], [-0.02615, 0.01395, 0.07995, -0.17095], [-0.15525, -0.02365, 0.25185, 0.14375], [-0.09400000000000003, 0.2221, -0.0618, -0.0427]], [[1.0, 0.0, 0.0, 0.0], [-0.0133, 0.0114, 0.0356, -0.0845], [-0.1339, -0.0465, 0.1877, 0.1215], [-0.08814999999999995, 0.13515, -0.06965, -0.05675]]]
set_33 = [[[1.0, 0.0, 0.0, 0.0], [0.00545000000000007, -0.45945, -5.00000000001055e-05, 0.79595], [-0.0015, 0.4806, -0.729, 0.2809], [-0.026699999999999946, 0.6552, 0.5704, 0.383]], [[1.0, 0.0, 0.0, 0.0], [0.00449999999999995, -0.3719, 0.0, 0.6607], [-0.00135000000000002, 0.39885, -0.58965, 0.23925], [-0.026550000000000018, 0.61985, 0.52605, 0.37185]], [[1.0, 0.0, 0.0, 0.0], [0.00325000000000003, -0.25845, -5.000000000005e-05, 0.47905], [-0.00105, 0.28915, -0.40885, 0.18155], [-0.02629999999999999, 0.5632, 0.4574, 0.3536]], [[1.0, 0.0, 0.0, 0.0], [0.002, -0.1511, 0.0, 0.2974], [-0.000899999999999998, 0.18, -0.2372, 0.1212], [-0.025949999999999973, 0.48875, 0.37185, 0.32915]], [[1.0, 0.0, 0.0, 0.0], [0.00105000000000002, -0.07175, -5.00000000000222e-05, 0.15285], [-0.000750000000000001, 0.09355, -0.11035, 0.06985], [-0.025400000000000034, 0.4012, 0.2779, 0.2996]], [[1.0, 0.0, 0.0, 0.0], [0.0205500000000001, -0.43285, 4.99999999998835e-05, 0.74525], [-0.00579999999999997, 0.4501, -0.687, 0.2614], [-0.10305000000000003, 0.59405, 0.52045, 0.34505]], [[1.0, 0.0, 0.0, 0.0], [0.01705, -0.35045, -4.99999999999945e-05, 0.61865], [-0.00510000000000002, 0.3735, -0.5557, 0.2226], [-0.10240000000000005, 0.562, 0.4799, 0.335]], [[1.0, 0.0, 0.0, 0.0], [0.01225, -0.24345, 4.99999999999945e-05, 0.44855], [-0.00414999999999999, 0.27075, -0.38525, 0.16885], [-0.10139999999999999, 0.5107, 0.4174, 0.3185]], [[1.0, 0.0, 0.0, 0.0], [0.00760000000000005, -0.1423, -5.55111512312578e-17, 0.2785], [-0.00335000000000001, 0.16845, -0.22365, 0.11265], [-0.09994999999999998, 0.44305, 0.33925, 0.29635]], [[1.0, 0.0, 0.0, 0.0], [0.00384999999999999, -0.06755, 4.99999999999945e-05, 0.14315], [-0.00284999999999999, 0.08755, -0.10405, 0.06485], [-0.09814999999999996, 0.36385, 0.25375, 0.26965]], [[1.0, 0.0, 0.0, 0.0], [0.04235, -0.39165, 4.99999999999945e-05, 0.66725], [-0.0122, 0.403, -0.6217, 0.2316], [-0.21819999999999995, 0.5039, 0.4462, 0.2896]], [[1.0, 0.0, 0.0, 0.0], [0.0350999999999999, -0.317, 1.11022302462516e-16, 0.5539], [-0.0107, 0.3344, -0.5029, 0.1972], [-0.21690000000000004, 0.4767, 0.4114, 0.2811]], [[1.0, 0.0, 0.0, 0.0], [0.0253, -0.2202, 0.0, 0.4016], [-0.0088, 0.2424, -0.3487, 0.1496], [-0.21479999999999994, 0.4332, 0.3578, 0.2672]], [[1.0, 0.0, 0.0, 0.0], [0.0156, -0.1286, 0.0, 0.2494], [-0.00710000000000001, 0.1508, -0.2025, 0.0997], [-0.21185000000000004, 0.37585, 0.29085, 0.24855]], [[1.0, 0.0, 0.0, 0.0], [0.00794999999999998, -0.06105, 5.00000000000222e-05, 0.12825], [-0.00604999999999999, 0.07825, -0.09435, 0.05725], [-0.20809999999999995, 0.3086, 0.2176, 0.2259]], [[1.0, 0.0, 0.0, 0.0], [0.06645, -0.33995, -4.99999999999945e-05, 0.57035], [-0.0199, 0.3445, -0.5396, 0.1951], [-0.3567, 0.3989, 0.3586, 0.2259]], [[1.0, 0.0, 0.0, 0.0], [0.055, -0.2751, 0.0, 0.4735], [-0.0175, 0.286, -0.4365, 0.1661], [-0.35475000000000007, 0.37745, 0.33065, 0.21925]], [[1.0, 0.0, 0.0, 0.0], [0.0397, -0.191, 5.55111512312578e-17, 0.3434], [-0.0144, 0.2073, -0.3027, 0.1259], [-0.3514999999999999, 0.343, 0.2876, 0.2083]], [[1.0, 0.0, 0.0, 0.0], [0.0245, -0.1115, -2.77555756156289e-17, 0.2133], [-0.01165, 0.12885, -0.17585, 0.08385], [-0.3469, 0.2976, 0.2338, 0.1936]], [[1.0, 0.0, 0.0, 0.0], [0.0125, -0.0529, -1.38777878078145e-17, 0.1097], [-0.01, 0.0667, -0.0821, 0.048], [-0.3409499999999999, 0.24425, 0.17495, 0.17585]], [[1.0, 0.0, 0.0, 0.0], [0.08815, -0.28245, -4.9999999999939e-05, 0.46455], [-0.02765, 0.28065, -0.44845, 0.15595], [-0.5018500000000001, 0.29385, 0.26945, 0.16325]], [[1.0, 0.0, 0.0, 0.0], [0.073, -0.2285, 0.0, 0.3857], [-0.0244, 0.233, -0.3627, 0.1327], [-0.4993000000000001, 0.2781, 0.2485, 0.1584]], [[1.0, 0.0, 0.0, 0.0], [0.05265, -0.15855, 5.000000000005e-05, 0.27975], [-0.02015, 0.16885, -0.25155, 0.10055], [-0.49495, 0.25275, 0.21605, 0.15045]], [[1.0, 0.0, 0.0, 0.0], [0.0325, -0.0926, 0.0, 0.1738], [-0.01645, 0.10495, -0.14625, 0.06685], [-0.48885, 0.21935, 0.17565, 0.13975]], [[1.0, 0.0, 0.0, 0.0], [0.0166, -0.0439, 1.38777878078145e-17, 0.0894], [-0.01425, 0.05425, -0.06835, 0.03815], [-0.4809999999999999, 0.18, 0.1315, 0.1267]]]
training_set = [set_1, set_2, set_3, set_4, set_5, set_6, set_7, set_8, set_9, set_10, set_11, set_12, set_13, set_14, set_15, set_16, set_17, set_18, set_19, set_20, set_21, set_22, set_23, set_24, set_25, set_26, set_27, set_28, set_29, set_30, set_31, set_32, set_33]
def Predicted_R(set_num, noise_ad, noise_dephasing):
matrix = set_num[0]
new_matrix = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
training_set = [set_1, set_2, set_3, set_4, set_5, set_6, set_7, set_8, set_9, set_10, set_11, set_12, set_13, set_14, set_15, set_16, set_17, set_18, set_19, set_20, set_21, set_22, set_23, set_24, set_25, set_26, set_27, set_28, set_29, set_30, set_31, set_32, set_33]
def index(noise_ad, noise_dephasing):
x = noise_ad * 20
x -= 1
x *= 5
y = noise_dephasing * 20
y -= 1
index = int(round(x+y))
return index
def predict(matrix, noise_ad, noise_dephasing):
for i in range(len(matrix)):
for j in range(len(matrix)):
sum = 0
for k in training_set:
weight = np.abs(2 - (matrix[i][j]-k[0][i][j]))
sum += weight**1
new_matrix[i][j] += weight**1 * k[index(noise_ad, noise_dephasing)][i][j]
new_matrix[i][j] *= 1/sum
#print(new_matrix)
return new_matrix
#print("fidelity", calc_fidelity(predict(matrix, noise_ad, noise_dephasing), set_num[index(noise_ad, noise_dephasing)]))
predictions.append(calc_fidelity(predict(matrix, noise_ad, noise_dephasing), set_num[index(noise_ad, noise_dephasing)]))
return predict(matrix, noise_ad, noise_dephasing)
#Predicted_R(set_4, 0.15, 0.15)
noise_val_1 = [0.05, 0.1, 0.15, 0.2, 0.25]
noise_val_2 = [0.05, 0.1, 0.15, 0.2, 0.25]
for i in training_set:
for j in noise_val_1:
for k in noise_val_2:
Predicted_R(i, j, k)
print("average fidelity", np.average(predictions))
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
from lib.util import execute
from lib.grover import grover
from lib.oracles.entangle import oracle
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
#
# Superimposed Emotions
#
# Generates an emoticon from two ASCII binary strings that contain nearly identical bits except for a few.
# The differing bits get applied entanglement, resulting in a random selection of either emoticon from the quantum circuit.
# The final output simulataneously represents both emoticons. Measuring the output collapses the state, resulting in a single emotion to display.
#
# Motivated by https://medium.com/qiskit/making-a-quantum-computer-smile-cee86a6fc1de and https://github.com/quantumjim/quantum_emoticon/blob/master/quantum_emoticon.ipynb
# See also the IBM Quantum Composer version at https://quantum-computing.ibm.com/composer/files/d363d10d96ff5f1b9000b64c56a9856a3fbe01a5dd0bb91288a12a47c22a76a8
#
emoticons = [
';)', # 00111011 00101001
'8)' # 00111000 00101001
# ^^----- all bits are the same, except for these two.
]
def decode_binary_string(s):
"""
Returns an ASCII string for a binary bit string (1 byte [8 bits] per character).
"""
#n = int(s, 2)
#return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode()
return ''.join(chr(int(s[i*8:i*8+8],2)) for i in range(len(s)//8))
def main():
"""
Return a random emoticon from ;) or 8) by using entangled qubits and bit strings to represent ASCII characters.
"""
emoticon1 = ';)'
emoticon2 = '8)'
# Convert ascii to a binary string.
b1 = ''.join(format(ord(i), '08b') for i in emoticon1)
b2 = ''.join(format(ord(i), '08b') for i in emoticon2)
# Number of qubits, 1 qubit for each bit.
n = len(b1)
# Create a quantum circuit.
var = QuantumRegister(n, 'var')
cr = ClassicalRegister(n, 'c')
qc = QuantumCircuit(var, cr)
# Append the oracle, which encodes the binary values and applies entanglement accordingly.
qc.append(oracle(b1, b2, [[8, 9]]), range(n))
# Measure all qubits to force a single result out of superposition.
qc.measure(var, cr)
print(qc.draw())
# Run the circuit several times to show the difference in superposition state.
results = []
for i in range(1000):
# Execute the quantum circuit.
result = execute(qc)
# Get the resulting hit counts.
counts = result.get_counts()
if i == 0:
print(counts)
# Find the most frequent hit count.
key = max(counts, key=counts.get)
# Decode the bits into ASCII characters (8 bits [1 byte] per character).
result = decode_binary_string(key)
results += result
# Print the output, random result of emoticon1 or emotion2!
if i > 0 and i % 80 == 0:
print(''.join(results))
results.clear()
main()
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
"""Python implementation of Grovers algorithm through use of the Qiskit library to find the value 3 (|11>)
out of four possible values."""
#import numpy and plot library
import matplotlib.pyplot as plt
import numpy as np
# importing Qiskit
from qiskit import IBMQ, Aer, QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.providers.ibmq import least_busy
from qiskit.quantum_info import Statevector
# import basic plot tools
from qiskit.visualization import plot_histogram
# define variables, 1) initialize qubits to zero
n = 2
grover_circuit = QuantumCircuit(n)
#define initialization function
def initialize_s(qc, qubits):
'''Apply a H-gate to 'qubits' in qc'''
for q in qubits:
qc.h(q)
return qc
### begin grovers circuit ###
#2) Put qubits in equal state of superposition
grover_circuit = initialize_s(grover_circuit, [0,1])
# 3) Apply oracle reflection to marked instance x_0 = 3, (|11>)
grover_circuit.cz(0,1)
statevec = job_sim.result().get_statevector()
from qiskit_textbook.tools import vector2latex
vector2latex(statevec, pretext="|\\psi\\rangle =")
# 4) apply additional reflection (diffusion operator)
grover_circuit.h([0,1])
grover_circuit.z([0,1])
grover_circuit.cz(0,1)
grover_circuit.h([0,1])
# 5) measure the qubits
grover_circuit.measure_all()
# Load IBM Q account and get the least busy backend device
provider = IBMQ.load_account()
device = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and
not x.configuration().simulator and x.status().operational==True))
print("Running on current least busy device: ", device)
from qiskit.tools.monitor import job_monitor
job = execute(grover_circuit, backend=device, shots=1024, optimization_level=3)
job_monitor(job, interval = 2)
results = job.result()
answer = results.get_counts(grover_circuit)
plot_histogram(answer)
#highest amplitude should correspond with marked value x_0 (|11>)
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
import json
import logging
import numpy as np
import warnings
from functools import wraps
from typing import Any, Callable, Optional, Tuple, Union
from qiskit import IBMQ, QuantumCircuit, assemble
from qiskit.circuit import Barrier, Gate, Instruction, Measure
from qiskit.circuit.library import UGate, U3Gate, CXGate
from qiskit.providers.ibmq import AccountProvider, IBMQProviderError
from qiskit.providers.ibmq.job import IBMQJob
def get_provider() -> AccountProvider:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ibmq_logger = logging.getLogger('qiskit.providers.ibmq')
current_level = ibmq_logger.level
ibmq_logger.setLevel(logging.ERROR)
# get provider
try:
provider = IBMQ.get_provider()
except IBMQProviderError:
provider = IBMQ.load_account()
ibmq_logger.setLevel(current_level)
return provider
def get_job(job_id: str) -> Optional[IBMQJob]:
try:
job = get_provider().backends.retrieve_job(job_id)
return job
except Exception:
pass
return None
def circuit_to_json(qc: QuantumCircuit) -> str:
class _QobjEncoder(json.encoder.JSONEncoder):
def default(self, obj: Any) -> Any:
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, complex):
return (obj.real, obj.imag)
return json.JSONEncoder.default(self, obj)
return json.dumps(circuit_to_dict(qc), cls=_QobjEncoder)
def circuit_to_dict(qc: QuantumCircuit) -> dict:
qobj = assemble(qc)
return qobj.to_dict()
def get_job_urls(job: Union[str, IBMQJob]) -> Tuple[bool, Optional[str], Optional[str]]:
try:
job_id = job.job_id() if isinstance(job, IBMQJob) else job
download_url = get_provider()._api_client.account_api.job(job_id).download_url()['url']
result_url = get_provider()._api_client.account_api.job(job_id).result_url()['url']
return download_url, result_url
except Exception:
return None, None
def cached(key_function: Callable) -> Callable:
def _decorator(f: Any) -> Callable:
f.__cache = {}
@wraps(f)
def _decorated(*args: Any, **kwargs: Any) -> int:
key = key_function(*args, **kwargs)
if key not in f.__cache:
f.__cache[key] = f(*args, **kwargs)
return f.__cache[key]
return _decorated
return _decorator
def gate_key(gate: Gate) -> Tuple[str, int]:
return gate.name, gate.num_qubits
@cached(gate_key)
def gate_cost(gate: Gate) -> int:
if isinstance(gate, (UGate, U3Gate)):
return 1
elif isinstance(gate, CXGate):
return 10
elif isinstance(gate, (Measure, Barrier)):
return 0
return sum(map(gate_cost, (g for g, _, _ in gate.definition.data)))
def compute_cost(circuit: Union[Instruction, QuantumCircuit]) -> int:
print('Computing cost...')
circuit_data = None
if isinstance(circuit, QuantumCircuit):
circuit_data = circuit.data
elif isinstance(circuit, Instruction):
circuit_data = circuit.definition.data
else:
raise Exception(f'Unable to obtain circuit data from {type(circuit)}')
return sum(map(gate_cost, (g for g, _, _ in circuit_data)))
def uses_multiqubit_gate(circuit: QuantumCircuit) -> bool:
circuit_data = None
if isinstance(circuit, QuantumCircuit):
circuit_data = circuit.data
elif isinstance(circuit, Instruction) and circuit.definition is not None:
circuit_data = circuit.definition.data
else:
raise Exception(f'Unable to obtain circuit data from {type(circuit)}')
for g, _, _ in circuit_data:
if isinstance(g, (Barrier, Measure)):
continue
elif isinstance(g, Gate):
if g.num_qubits > 1:
return True
elif isinstance(g, (QuantumCircuit, Instruction)) and uses_multiqubit_gate(g):
return True
return False
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
from qiskit import QuantumCircuit
def oracle(b1, b2, entanglements):
'''
Takes two bit strings (b1 and b2) and entangles the qubits at indices entanglements[] using CNOT.
Returns a quantum circuit that represents the state of both bit strings simultaneously.
Measuring the result will force an outcome of b1 or b2.
Parameters:
b1, b2: bit strings (for example, 16 bit ASCII character)
entanglements: list of pairs of indices to entangle qubits. For example: [ [9, 8], [control, target], ... ]
'''
b1 = b1[::-1]
b2 = b2[::-1]
# Number of qubits will be 1 qubit for each bit.
n = len(b1)
# We use a circuit of size n.
qc = QuantumCircuit(n)
# Flip all qubits that have a 1 in the bit string.
for i in range(n):
if b1[i] == '1' and b2[i] == '1':
qc.x(i)
# Entangle qubits.
for entanglement in entanglements:
control = entanglement[0]
target = entanglement[1]
# Place control qubit in superposition.
qc.h(control)
# Entangle target qubit to control, spreading superposition and state.
qc.cx(control, target)
print(qc.draw())
# Convert the oracle to a gate.
gate = qc.to_gate()
gate.name = "oracle"
return gate
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
from qiskit import QuantumCircuit
from qiskit.circuit.library.standard_gates.z import ZGate
def oracle(logic, n):
'''
Returns a quantum circuit that recognizes even numbers. We do this by checking if qubit 0 equals 0 (even).
Upon starting, all qubits are assumed to have a value of 1. We only need to consider qubit 0, the other qubits may be ignored.
Parameters:
logic: not used (should be None).
n: the number of qubits in the circuit.
'''
# We use a circuit of size n+1 to include an output qubit.
qc = QuantumCircuit(n+1)
# Flip the first qubit to 0, since we want to obtain that value.
qc.x(0)
# Apply a controlled Z-gate from qubit 0 to each of the other qubits. When qubit 0 is 0, the others are flipped, setting the phase.
qc.append(ZGate().control(1), [0,range(1,n+1)])
# qc.append(ZGate().control(1), [0,1])
# qc.append(ZGate().control(1), [0,2])
# qc.append(ZGate().control(1), [0,3])
# Undo the flip of the first qubit.
qc.x(0)
print(qc.draw())
# Convert the oracle to a gate.
gate = qc.to_gate()
gate.name = "oracle"
return gate
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
from qiskit import QuantumCircuit
from qiskit.circuit.classicalfunction.classicalfunction import ClassicalFunction
def oracle(logic, n):
'''
Returns a quantum circuit that implementes the logic for n qubits.
Parameters:
logic: a Python function using the format below.
def oracle_func(x1: Int1, x2: Int1, x3: Int1) -> Int1:\n return (x1 and not x2 and not x3)
n: the number of qubits in the circuit.
'''
# Convert the logic to a quantum circuit.
formula = ClassicalFunction(logic)
fc = formula.synth()
# Convert the quantum circuit to a quantum program.
qc = QuantumCircuit(n+1)
qc.compose(fc, inplace=True)
print(qc.draw())
# Convert the oracle to a gate.
gate = qc.to_gate()
gate.name = "oracle"
return gate
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
from qiskit import QuantumCircuit
from qiskit.circuit.library.standard_gates.z import ZGate
def oracle(i, n):
'''
Returns a quantum circuit that recognizes a single number i.
Upon starting, all qubits are assumed to have a value of 1.
Parameters:
i: the target value to be recognized, an integer between 0 and 2^n-1.
n: the number of qubits in the circuit.
'''
# We use a circuit of size n+1 to include an output qubit.
qc = QuantumCircuit(n+1)
# Convert i to a binary string.
bin_str = bin(i)[2:]
# Pad the binary string with zeros to the length of the qubits.
bin_str = bin_str.zfill(n)
print('Encoding ' + bin_str)
# Reverse the bits since qiskit represents the qubits from right to left.
bin_str = bin_str[::-1]
# Flip each qubit to zero to match the bits in the target number i.
for j in range(len(bin_str)):
if bin_str[j] == '0':
qc.x(j)
# Apply a controlled Z-gate on all qubits, setting the phase.
qc.append(ZGate().control(n), range(n+1))
# Undo each inverted qubit.
for j in range(len(bin_str)):
if bin_str[j] == '0':
qc.x(j)
print(qc.draw())
# Convert the oracle to a gate.
gate = qc.to_gate()
gate.name = "oracle"
return gate
|
https://github.com/primaryobjects/oracle
|
primaryobjects
|
from qiskit import QuantumCircuit
from qiskit.circuit.library.standard_gates.z import ZGate
def oracle(logic, n):
'''
Returns a quantum circuit that recognizes odd numbers. We do this by checking if qubit 0 equals 1 (odd).
Upon starting, all qubits are assumed to have a value of 1. We only need to consider qubit 0, the other qubits may be ignored.
Parameters:
logic: not used (should be None).
n: the number of qubits in the circuit.
'''
# We use a circuit of size n+1 to include an output qubit.
qc = QuantumCircuit(n+1)
# Apply a controlled Z-gate from qubit 0 to each of the other qubits. When qubit 0 is 1, the others are flipped, setting the phase.
qc.append(ZGate().control(1), [0,range(1,n+1)])
# qc.append(ZGate().control(1), [0,1])
# qc.append(ZGate().control(1), [0,2])
# qc.append(ZGate().control(1), [0,3])
print(qc.draw())
# Convert the oracle to a gate.
gate = qc.to_gate()
gate.name = "oracle"
return gate
|
https://github.com/esloho/Qrangen
|
esloho
|
from math import sqrt, pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
import oracle_simple
import composed_gates
def get_circuit(n, oracles):
"""
Build the circuit composed by the oracle black box and the other quantum gates.
:param n: The number of qubits (not including the ancillas)
:param oracles: A list of black box (quantum) oracles; each of them selects a specific state
:returns: The proper quantum circuit
:rtype: qiskit.QuantumCircuit
"""
cr = ClassicalRegister(n)
## Testing
if n > 3:
#anc = QuantumRegister(n - 1, 'anc')
# n qubits for the real number
# n - 1 qubits for the ancillas
qr = QuantumRegister(n + n - 1)
qc = QuantumCircuit(qr, cr)
else:
# We don't need ancillas
qr = QuantumRegister(n)
qc = QuantumCircuit(qr, cr)
## /Testing
print("Number of qubits is {0}".format(len(qr)))
print(qr)
# Initial superposition
for j in range(n):
qc.h(qr[j])
# The length of the oracles list, or, in other words, how many roots of the function do we have
m = len(oracles)
# Grover's algorithm is a repetition of an oracle box and a diffusion box.
# The number of repetitions is given by the following formula.
print("n is ", n)
r = int(round((pi / 2 * sqrt((2**n) / m) - 1) / 2))
print("Repetition of ORACLE+DIFFUSION boxes required: {0}".format(r))
oracle_t1 = oracle_simple.OracleSimple(n, 5)
oracle_t2 = oracle_simple.OracleSimple(n, 0)
for j in range(r):
for i in range(len(oracles)):
oracles[i].get_circuit(qr, qc)
diffusion(n, qr, qc)
for j in range(n):
qc.measure(qr[j], cr[j])
return qc, len(qr)
def diffusion(n, qr, qc):
"""
The Grover diffusion operator.
Given the arry of qiskit QuantumRegister qr and the qiskit QuantumCircuit qc, it adds the diffusion operator to the appropriate qubits in the circuit.
"""
for j in range(n):
qc.h(qr[j])
# D matrix, flips state |000> only (instead of flipping all the others)
for j in range(n):
qc.x(qr[j])
# 0..n-2 control bits, n-1 target, n..
if n > 3:
composed_gates.n_controlled_Z_circuit(
qc, [qr[j] for j in range(n - 1)], qr[n - 1],
[qr[j] for j in range(n, n + n - 1)])
else:
composed_gates.n_controlled_Z_circuit(
qc, [qr[j] for j in range(n - 1)], qr[n - 1], None)
for j in range(n):
qc.x(qr[j])
for j in range(n):
qc.h(qr[j])
|
https://github.com/esloho/Qrangen
|
esloho
|
# Copyright (C) 2024 qBraid
#
# This file is part of the qBraid-SDK
#
# The qBraid-SDK is free software released under the GNU General Public License v3
# or later. You can redistribute and/or modify it under the terms of the GPL v3.
# See the LICENSE file in the project root or <https://www.gnu.org/licenses/gpl-3.0.html>.
#
# THERE IS NO WARRANTY for the qBraid-SDK, as per Section 15 of the GPL v3.
"""
Module defining QiskitBackend Class
"""
from typing import TYPE_CHECKING, Optional, Union
from qiskit_ibm_runtime import QiskitRuntimeService
from qbraid.programs import load_program
from qbraid.runtime.device import QuantumDevice
from qbraid.runtime.enums import DeviceStatus, DeviceType
from .job import QiskitJob
if TYPE_CHECKING:
import qiskit
import qiskit_ibm_runtime
import qbraid.runtime.qiskit
class QiskitBackend(QuantumDevice):
"""Wrapper class for IBM Qiskit ``Backend`` objects."""
def __init__(
self,
profile: "qbraid.runtime.TargetProfile",
service: "Optional[qiskit_ibm_runtime.QiskitRuntimeService]" = None,
):
"""Create a QiskitBackend."""
super().__init__(profile=profile)
self._service = service or QiskitRuntimeService()
self._backend = self._service.backend(self.id, instance=self.profile.get("instance"))
def __str__(self):
"""Official string representation of QuantumDevice object."""
return f"{self.__class__.__name__}('{self.id}')"
def status(self):
"""Return the status of this Device.
Returns:
str: The status of this Device
"""
if self.device_type == DeviceType.LOCAL_SIMULATOR:
return DeviceStatus.ONLINE
status = self._backend.status()
if status.operational:
if status.status_msg == "active":
return DeviceStatus.ONLINE
return DeviceStatus.UNAVAILABLE
return DeviceStatus.OFFLINE
def queue_depth(self) -> int:
"""Return the number of jobs in the queue for the ibm backend"""
if self.device_type == DeviceType.LOCAL_SIMULATOR:
return 0
return self._backend.status().pending_jobs
def transform(self, run_input: "qiskit.QuantumCircuit") -> "qiskit.QuantumCircuit":
"""Transpile a circuit for the device."""
program = load_program(run_input)
program.transform(self)
return program.program
def submit(
self,
run_input: "Union[qiskit.QuantumCircuit, list[qiskit.QuantumCircuit]]",
*args,
**kwargs,
) -> "qbraid.runtime.qiskit.QiskitJob":
"""Runs circuit(s) on qiskit backend via :meth:`~qiskit.execute`
Uses the :meth:`~qiskit.execute` method to create a :class:`~qiskit.providers.QuantumJob`
object, applies a :class:`~qbraid.runtime.qiskit.QiskitJob`, and return the result.
Args:
run_input: A circuit object to run on the IBM device.
Keyword Args:
shots (int): The number of times to run the task on the device. Default is 1024.
Returns:
qbraid.runtime.qiskit.QiskitJob: The job like object for the run.
"""
backend = self._backend
shots = kwargs.pop("shots", backend.options.get("shots"))
memory = kwargs.pop("memory", True) # Needed to get measurements
job = backend.run(run_input, *args, shots=shots, memory=memory, **kwargs)
return QiskitJob(job.job_id(), job=job, device=self)
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
# importing the libraries
from qiskit import *
from qiskit import IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import style
from PIL import Image
style.use('bmh')
# A square grayscale image respresented as a numpy array
temp = Image.open('star64.jpg').convert('L')
image = np.asarray(temp).astype('float64')
image /= 256
#print(image)
# Function for plotting the image using matplotlib
def plot_image(img, title: str):
plt.title(title)
#plt.xticks(range(img.shape[0]))
#plt.yticks(range(img.shape[1]))
plt.imshow(img, extent=[0, img.shape[0], img.shape[1], 0], cmap = 'Greys')
plt.show()
plot_image(image, 'Original Image')
# Convert the raw pixel values to probability amplitudes
def amplitude_encode(img_data):
# Calculate the RMS value
rms = np.sqrt(np.sum(np.sum(img_data**2, axis=1)))
# Create normalized image
image_norm = []
for arr in img_data:
for ele in arr:
image_norm.append(ele / rms)
# Return the normalized image as a numpy array
return np.array(image_norm)
# Get the amplitude ancoded pixel values
# Horizontal: Original image
image_norm_h = amplitude_encode(image)
# Vertical: Transpose of Original image
image_norm_v = amplitude_encode(image.T)
# Initialize some global variable for number of qubits
data_qb = math.ceil(np.log(image.size) / np.log(2))
anc_qb = 1
total_qb = data_qb + anc_qb
# Initialize the decrement unitary
D2n_1 = np.roll(np.identity(2**total_qb), 1, axis=1)
# Create the circuit for horizontal scan
qc_h = QuantumCircuit(total_qb)
qc_h.initialize(image_norm_h, range(1, total_qb))
qc_h.h(0)
qc_h.unitary(D2n_1, range(total_qb))
qc_h.h(0)
display(qc_h.draw('mpl', fold=-1))
# Create the circuit for vertical scan
qc_v = QuantumCircuit(total_qb)
qc_v.initialize(image_norm_v, range(1, total_qb))
qc_v.h(0)
qc_v.unitary(D2n_1, range(total_qb))
qc_v.h(0)
display(qc_v.draw('mpl', fold=-1))
# Combine both circuits into a single list
circ_list = [qc_h, qc_v]
# Simulating the cirucits
back = Aer.get_backend('statevector_simulator')
results = execute(circ_list, backend=back).result()
sv_h = results.get_statevector(qc_h)
sv_v = results.get_statevector(qc_v)
from qiskit.visualization import array_to_latex
print('Horizontal scan statevector:')
display(array_to_latex(sv_h[:30], max_size=30))
print()
print('Vertical scan statevector:')
display(array_to_latex(sv_v[:30], max_size=30))
# Classical postprocessing for plotting the output
# Defining a lambda function for
# thresholding to binary values
threshold = lambda amp: (amp > 0.002 or amp < -0.002)
# Selecting odd states from the raw statevector and
# reshaping column vector of size 64 to an 8x8 matrix
edge_scan_h = np.abs(np.array([1 if threshold(sv_h[2*i+1].real) else 0 for i in range(2**data_qb)])).reshape(image.shape[0], image.shape[1])
edge_scan_v = np.abs(np.array([1 if threshold(sv_v[2*i+1].real) else 0 for i in range(2**data_qb)])).reshape(image.shape[0], image.shape[1]).T
# Plotting the Horizontal and vertical scans
plot_image(edge_scan_h, 'Horizontal scan output')
plot_image(edge_scan_v, 'Vertical scan output')
# Combining the horizontal and vertical component of the result
edge_scan_sim = edge_scan_h | edge_scan_v
# Plotting the original and edge-detected images
plot_image(image, 'Original image')
plot_image(edge_scan_sim, 'Edge Detected image')
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.visualization import plot_histogram, plot_bloch_multivector
from numpy.random import randint
import numpy as np
print("Imports Successful")
qc = QuantumCircuit(1,1)
# Alice prepares qubit in state |+>
qc.h(0)
qc.barrier()
# Alice now sends the qubit to Bob
# who measures it in the X-basis
qc.h(0)
qc.measure(0,0)
# Draw and simulate circuit
display(qc.draw())
aer_sim = Aer.get_backend('aer_simulator')
job = aer_sim.run(assemble(qc))
plot_histogram(job.result().get_counts())
qc = QuantumCircuit(1,1)
# Alice prepares qubit in state |+>
qc.h(0)
# Alice now sends the qubit to Bob
# but Eve intercepts and tries to read it
qc.measure(0, 0)
qc.barrier()
# Eve then passes this on to Bob
# who measures it in the X-basis
qc.h(0)
qc.measure(0,0)
# Draw and simulate circuit
display(qc.draw())
aer_sim = Aer.get_backend('aer_simulator')
job = aer_sim.run(assemble(qc))
plot_histogram(job.result().get_counts())
np.random.seed(seed=0)
n = 100
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
print(alice_bits)
np.random.seed(seed=0)
n = 100
## Step 1
#Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
print(alice_bases)
def encode_message(bits, bases):
message = []
for i in range(n):
qc = QuantumCircuit(1,1)
if bases[i] == 0: # Prepare qubit in Z-basis
if bits[i] == 0:
pass
else:
qc.x(0)
else: # Prepare qubit in X-basis
if bits[i] == 0:
qc.h(0)
else:
qc.x(0)
qc.h(0)
qc.barrier()
message.append(qc)
return message
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
print('bit = %i' % alice_bits[0])
print('basis = %i' % alice_bases[0])
message[0].draw()
print('bit = %i' % alice_bits[4])
print('basis = %i' % alice_bases[4])
message[4].draw()
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
print(bob_bases)
def measure_message(message, bases):
backend = Aer.get_backend('aer_simulator')
measurements = []
for q in range(n):
if bases[q] == 0: # measuring in Z-basis
message[q].measure(0,0)
if bases[q] == 1: # measuring in X-basis
message[q].h(0)
message[q].measure(0,0)
aer_sim = Aer.get_backend('aer_simulator')
qobj = assemble(message[q], shots=1, memory=True)
result = aer_sim.run(qobj).result()
measured_bit = int(result.get_memory()[0])
measurements.append(measured_bit)
return measurements
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
message[0].draw()
message[6].draw()
print(bob_results)
def remove_garbage(a_bases, b_bases, bits):
good_bits = []
for q in range(n):
if a_bases[q] == b_bases[q]:
# If both used the same basis, add
# this to the list of 'good' bits
good_bits.append(bits[q])
return good_bits
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
print(alice_key)
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
print(bob_key)
def sample_bits(bits, selection):
sample = []
for i in selection:
# use np.mod to make sure the
# bit we sample is always in
# the list range
i = np.mod(i, len(bits))
# pop(i) removes the element of the
# list at index 'i'
sample.append(bits.pop(i))
return sample
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
## Step 5
sample_size = 15
bit_selection = randint(n, size=sample_size)
bob_sample = sample_bits(bob_key, bit_selection)
print(" bob_sample = " + str(bob_sample))
alice_sample = sample_bits(alice_key, bit_selection)
print("alice_sample = "+ str(alice_sample))
bob_sample == alice_sample
print(bob_key)
print(alice_key)
print("key length = %i" % len(alice_key))
np.random.seed(seed=3)
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
print(alice_bits)
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
print(alice_bases)
message[0].draw()
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
print(intercepted_message)
message[0].draw()
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
## Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
message[0].draw()
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
## Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
## Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
## Step 5
sample_size = 15
bit_selection = randint(n, size=sample_size)
bob_sample = sample_bits(bob_key, bit_selection)
print(" bob_sample = " + str(bob_sample))
alice_sample = sample_bits(alice_key, bit_selection)
print("alice_sample = "+ str(alice_sample))
bob_sample == alice_sample
n = 100
# Step 1
alice_bits = randint(2, size=n)
alice_bases = randint(2, size=n)
# Step 2
message = encode_message(alice_bits, alice_bases)
# Interception!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
# Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
# Step 4
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
# Step 5
sample_size = 15 # Change this to something lower and see if
# Eve can intercept the message without Alice
# and Bob finding out
bit_selection = randint(n, size=sample_size)
bob_sample = sample_bits(bob_key, bit_selection)
alice_sample = sample_bits(alice_key, bit_selection)
if bob_sample != alice_sample:
print("Eve's interference was detected.")
else:
print("Eve went undetected!")
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
a = -1
b = 2.3
z = 2 + 3j
# type of a variable
print(type(a))
print(type(b))
print(type(z))
# real and imaginary parts
print(np.real(z))
print(np.imag(z))
# conjugation
print(z)
print(np.conjugate(z))
# plot the conjugates
import matplotlib.pyplot as plt
# make figure
fig = plt.figure()
# get the axes
ax = plt.axes()
# here we use "scatter" to plot individual points on the 2D plane
ax.scatter(0, 0, marker='o', color='gray')
ax.scatter(np.real(z), np.imag(z), marker='s', label="$z$")
ax.scatter(np.real(np.conjugate(z)), np.imag(np.conjugate(z)), marker='*', label="$z^*$")
# fix the axis limit
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
# show the grid lines
ax.grid()
# show the labels
ax.legend()
# show the plot
plt.show()
# multiplication
print(np.sqrt(np.conjugate(z)*z))
# norm / absolute value
print(np.sqrt(2.0*2.0 + 3*3))
print(np.absolute(z))
# in radians
print(np.angle(z))
# in angles
print(np.angle(z, deg=True))
c = -1
z = 0
# define an array
max_iteration = 1000
threshold = 10
A = np.zeros(max_iteration, dtype=np.complex128)
r = np.linspace(-2, 2, 201)
i = np.linspace(-2, 2, 201)
def convergence(z):
for index in range(max_iteration):
f = z**2 + c
# A[index] = f
z = f
if np.absolute(z) > threshold:
return False
return True
convergence_array = np.zeros((201, 201))
for index_real in range(101):
for index_imag in range(101):
z = r[index_real] + 1j*i[index_imag]
if convergence(z):
convergence_array[index_real, index_imag] = np.abs(z)
# make figure
fig = plt.figure()
# get the axes
ax = plt.axes()
# ax.scatter(np.real(A), np.imag(A))
ax.imshow(convergence_array)
plt.show()
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
import matplotlib.pyplot as plt
def error_rate_3bit(p):
return p**3 + 3*p**2*(1-p)
p_vals = np.linspace(0, 1, num=100)
plt.plot(p_vals, 1 - error_rate_3bit(p_vals), label='3bit no error')
# plt.plot(p_vals, error_rate_3bit(p_vals), label='3bit error')
plt.plot(p_vals, 1 - p_vals, label='1bit no error')
# plt.plot(p_vals, p_vals, label='1bit error')
plt.xlabel("single bit error p")
plt.legend()
from qiskit import *
def bit_flip_correction(num_qubits):
circ = QuantumCircuit(num_qubits, num_qubits)
circ.h(0)
for i in range(num_qubits - 1):
circ.cx(i, i+1)
return circ
circuit = bit_flip_correction(3)
circuit.draw()
backend = Aer.get_backend('statevector_simulator')
job = execute(circuit, backend)
results = job.result()
psi = results.get_statevector(circuit)
print(psi)
def bit_flip_correction_noisy(num_qubits, p):
apply_error = np.random.binomial(1, p)
circ = QuantumCircuit(num_qubits, num_qubits)
circ.h(0)
for i in range(num_qubits - 1):
circ.cx(i, i+1)
if apply_error == 1:
circ.x(0)
return circ
circuit = bit_flip_correction_noisy(3, 0.25)
circuit.draw()
def phase_flip_circuit(num_qubits):
circ = QuantumCircuit(num_qubits, num_qubits)
for i in range(num_qubits - 1):
circ.cx(i, i+1)
circ.barrier()
for i in range(num_qubits):
circ.h(i)
return circ
circ = phase_flip_circuit(3)
circ.draw()
import numpy as np
Cx = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
I = np.array([[1, 0], [0, 1]])
M = np.kron(Cx, I)
print(M)
W = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]])
print(W)
U = np.matmul(W, M)
print(U)
psi = np.array([0.3, 0.9])
zero = np.array([1.0, 0.0])
v = np.kron(psi, zero)
v = np.kron(v, zero)
def print_state(v):
for i in range(len(v)):
print(f"{bin(i)}: " + str(v[i]))
print_state(v)
v_final = np.matmul(U, v)
print_state(v_final)
from qiskit import *
n = 5
qc = QuantumCircuit(n, n)
# initialize an equal superposition on the first qubit
qc.h(0)
# entangle the first three qubits
qc.cx(0, 1)
qc.cx(0, 2)
# apply noise/error
qc.barrier()
idx = np.random.randint(0, 3)
qc.x(idx)
qc.barrier()
# prepare syndrome qubits
qc.cx(0, 3)
qc.cx(1, 3)
qc.cx(0, 4)
qc.cx(2, 4)
# add measurement to the syndrome qubits
qc.measure(3, 3)
qc.measure(4, 4)
qc.draw()
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
# import numpy
# this is a comment
pi_number = np.pi
print(pi_number)
e_number = np.e
print(e_number)
# float with specific number of decimals
print("pie is {0:.5f}, {1}".format(pi_number, " test"))
print(f"pi is {pi_number}")
print("The Euler number is {0:.2f}".format(e_number))
# scientific format with a given number of decimals
x = 3074.1e4
print("x is {0:.2e}".format(x))
# define an array manually
A = np.array([0, 3, 7, -2])
# print(A)
print("A is " + str(A))
# define an array with linearly spaced components
B = np.linspace(0, 1, num=11, endpoint=True)
print("B is " + str(B))
# print the size of the array
print(np.size(A))
print(np.size(B))
# square
x = 2.0
y = np.square(x)
print("y is " + str(y))
# square root
a = 36
b = np.sqrt(a)
print("b is " + str(b))
theta = np.pi/4
f = np.cos(theta)
print(f)
print(np.sqrt(2)/2)
# custom functions
def my_function(x):
return x**3
print(my_function(2))
print(my_function(3))
print(my_function(A))
B = np.array([[1, -2, 3, -4], [0, -1.5, 3, 7 + 1j]])
print(B*2)
print(type(B))
C = [1, 2, "test", my_function]
print(C)
M = np.ones((4, 7))
N = np.ones((7, 3))
print(np.matmul(M, N))
def another_function(x):
return x**3 -3*x**2 + 2
x = np.linspace(-2, 2, num=101)
y = another_function(x)
print(np.size(x))
print(np.size(y))
import matplotlib.pyplot as plt
# make figure
fig = plt.figure()
# get the axes
ax = plt.axes()
# plot y array versus x array
# ax.plot(x, y)
ax.scatter(x, y, s=1)
# labels
ax.set_xlabel('the x axis')
ax.set_ylabel('y = f(x)')
# show the plot
plt.show()
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
n = 4
dimension = 2**n
v = np.zeros(dimension, dtype=np.complex128)
print(np.shape(v))
real = np.random.rand(dimension)
imag = np.random.rand(dimension)
v = real + 1j*imag
# calculate norm of v
def norm(v):
norm = np.sqrt(np.dot(np.conjugate(v), v))
return norm
print(norm(v))
v_normalized = v/norm(v)
print(v)
print(v_normalized)
print(norm(v))
print(norm(v_normalized))
def print_probabilities(v):
sum = 0
for i in range(len(v)):
p = abs(np.conjugate(v[i])*v[i])
sum = sum + p
print("state: {0:04b}".format(i) + ", p = {0:.4f}".format(p))
print("total probability = {0:.4f}".format(sum))
print_probabilities(v_normalized)
import matplotlib.pyplot as plt
states = []
for i in range(len(v)):
s = "{0:04b}".format(i)
states.append(s)
prob = list(np.real(np.conjugate(v_normalized)*v_normalized))
plt.bar(states, v)
plt.xticks(rotation=45)
v = np.zeros(dimension, dtype=np.complex128)
v[0] = 1/np.sqrt(2)
v[-1] = 1/np.sqrt(2)
import matplotlib.pyplot as plt
v_normalized = v
states = []
for i in range(len(v)):
s = "{0:04b}".format(i)
states.append(s)
prob = list(np.real(np.conjugate(v_normalized)*v_normalized))
plt.bar(states, prob)
plt.xticks(rotation=45)
def tensor_product(u, v):
prod = np.zeros(len(u)*len(v))
for i in range(len(u)):
prod[i*len(v):(i+1)*len(v)] = u[i]*v
return prod
u = np.array([1, -1, 2])
v = np.array([0.6, 0.7, 0.8])
print(tensor_product(u, v))
def tensor_product(A, B):
m = np.shape(A)[0]
n = np.shape(A)[1]
r = np.shape(B)[0]
p = np.shape(B)[1]
prod = np.zeros((m*r, n*p))
for i in range(m):
for j in range(n):
prod[i*r:(i+1)*r, j*p:(j+1)*p] = A[i, j]*B
return prod
X = np.array([[0, 1], [1, 0]])
Z = np.array([[1, 0], [0, -1]])
print(tensor_product(X, Z))
import numpy as np
def print_pauli(A):
c1 = (A[0, 1] + A[1, 0])/2
c2 = (A[0, 1] - A[1, 0])/(2j)
c3 = (A[0, 0] + A[1, 1])/2
c4 = (A[0, 0] - A[1, 1])/2
print(f"{c1}X + {c2}Y + {c3}Z + {c4}I")
M = np.array([[1, 1], [-1, 3]])
print_pauli(M)
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
v = np.array([1, 1])
v2 = np.array([[1], [1]])
u = np.array([0, -1, 2, 3, -0.3])
r = np.zeros(5)
r2 = np.zeros((5, 1))
print(v)
print(v2)
print(u)
print(r)
print(r2)
print(u[3])
u2 = u[:3]
print(u2)
u3 = u[3:]
print(u3)
def norm(x):
sum = 0
for val in x:
sum = sum + val**2
return np.sqrt(sum)
v = np.array([3, -1, 0, 1, 2, -1])
v_norm = norm(v)
print(v_norm)
def dot_product(x, y):
sum = 0
for i in range(len(x)):
sum = sum + x[i]*y[i]
return sum
v = np.array([3, -1, 0, 1, 2, -1])
u = np.array([1, 2, 3, -4, 0.2, 1])
print(dot_product(u, v))
v = np.array([1, -1, 3])
print(norm(v))
print(dot_product(v, v))
print(np.sqrt(dot_product(v, v)))
X = np.array([[0, 1], [1, 0]])
print(X)
Y = np.array([[1, 3, 1], [0, -0.4, 5]])
print(Y)
Z = np.array([[2, -1, 2, 7],[-1.1, 3.3, 4, 0.7],[9, -8, 2, 0]])
print(Z)
print(np.shape(Y))
def mult(A, B):
C = np.zeros((np.shape(A)[0], np.shape(B)[1]))
if np.shape(A)[1] == np.shape(B)[0]:
for i in range(np.shape(A)[0]):
row = A[i, :]
for j in range(np.shape(B)[1]):
col = B[:, j]
C[i, j] = dot_product(row, col)
else:
raise ValueError("dimensions do not match!")
return C
X = np.array([[0, 1, 3], [2, 4, 8]])
Y = np.array([[0], [2], [-5]])
print(mult(X, Y))
def equal(A, B):
if np.shape(A)[0] != np.shape(B)[0]:
return False
if np.shape(A)[1] != np.shape(B)[1]:
return False
for i in range(np.shape(A)[0]):
for j in range(np.shape(A)[1]):
if A[i, j] != B[i, j]:
return False
return True
print(equal(X, X))
def transpose(A):
A_T = np.zeros((np.shape(A)[1], np.shape(A)[0]), dtype=np.complex128)
for i in range(np.shape(A)[0]):
for j in range(np.shape(A)[1]):
A_T[j, i] = A[i, j]
return A_T
print(transpose(np.array([[1, 3, 0], [-1, 2, 4]], dtype=np.complex128)))
def conjugate(A):
A_C = np.zeros_like(A, dtype=np.complex128)
for i in range(np.shape(A)[0]):
for j in range(np.shape(A)[1]):
A_C[i, j] = np.conjugate(A[i, j])
return A_C
A = np.array([[1, 3j, 0], [-1j + 1, 2, 4]], dtype=np.complex128)
print(conjugate(transpose(A)))
def is_hermitian(A):
A_TC = conjugate(transpose(A))
return equal(A, A_TC)
print(is_hermitian(A))
B = np.array([[2, 1 + 1j], [1 - 1j, 3]])
print(is_hermitian(B))
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
# import numpy library
import numpy as np
# import pyplot library
import matplotlib.pyplot as plt
# generate a random number between 0 and 1
np.random.randint(0, 5)
np.random.randint(0, 2, size=10)
# 1000 coin flips
A = np.random.randint(0, 2, size=1000)
# make figure
fig = plt.figure()
# get the axes
ax = plt.axes()
# plot a histogram of the outcomes
plt.hist(A)
# calculate how many people share the same birthday
def calculate_matches(n):
outcome = np.random.randint(1, 366, size=n)
number_of_matches = 0
for i in range(n):
for j in range(i+1, n):
if outcome[i] == outcome[j]:
number_of_matches += 1
return number_of_matches
party_size = np.arange(10, 2000, 100)
matches = np.zeros_like(party_size)
# calculate for all different party sizes
for i in range(len(party_size)):
matches[i] = calculate_matches(party_size[i])
print(party_size)
print(matches)
plt.plot(party_size, matches)
# plot that number vs N
# a coin flip with p = 0.8
np.random.choice([0, 1], p=[0.8, 0.2])
# 1000 biased coin flips with p = 0.8
B = np.random.choice([0, 1], size=1000, p=[0.8, 0.2])
plt.hist(B)
# 10 coin flips each with success probability p = 0.5
# the output is the number of heads out of 10 total coin flips
np.random.binomial(10, 0.5)
# we can run the whole binomial experiment for multiple times using the "size" argument
# this means that if we flip 10 coins for 5 different iterations
# the output would be the number of heads in each iteration
np.random.binomial(10, 0.5, size=5)
# change the number of iterations (size), what do you observe as you increase the size?
C = np.random.binomial(10, 0.5, size=100)
plt.hist(C)
max_trials = 1000
num_trials = np.arange(max_trials)
avg_array = np.zeros_like(num_trials, dtype=np.double)
for i in range(max_trials):
random_event = np.array(np.random.choice([1, 2, 3, 4, 5, 6], size=i+1))
avg_array[i] = np.average(random_event)
plt.plot(num_trials, avg_array)
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
from qiskit import *
# Create a Quantum Circuit acting on a quantum register of two qubits
circ = QuantumCircuit(2, 2)
circ = QuantumCircuit(4, 4)
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.x(0)
circ.x(1)
circ.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state.
circ.cx(0, 1)
circ.cz(1, 0)
circ.swap(0, 1)
circ.barrier()
circ.y(2)
circ.measure(0, 0)
# circ.measure(1, 1)
circ.draw()
# Create a Quantum Circuit acting on a quantum register of three qubits
circ = QuantumCircuit(3)
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state.
circ.cx(0, 1)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting
# the qubits in a GHZ state.
circ.cx(0, 2)
circ.draw('mpl')
# Import Aer
from qiskit import Aer
# Run the quantum circuit on a statevector simulator backend
backend = Aer.get_backend('statevector_simulator')
# Create a Quantum Program for execution
job = execute(circ, backend)
result = job.result()
outputstate = result.get_statevector(circ, decimals=3)
print(outputstate)
from qiskit.visualization import plot_state_city
plot_state_city(outputstate)
# Run the quantum circuit on a unitary simulator backend
backend = Aer.get_backend('unitary_simulator')
job = execute(circ, backend)
result = job.result()
# Show the results
print(result.get_unitary(circ, decimals=3))
# Create a Quantum Circuit
meas = QuantumCircuit(2, 2)
meas.barrier(range(2))
# map the quantum measurement to the classical bits
meas.measure(range(2), range(2))
# The Qiskit circuit object supports composition using
# the addition operator.
qc = circ + meas
#drawing the circuit
qc.draw()
# Use Aer's qasm_simulator
backend_sim = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator.
# We've set the number of repeats of the circuit
# to be 1024, which is the default.
circ.measure(0, 0)
circ.measure(1, 1)
job_sim = execute(circ, backend_sim, shots=1024)
# Grab the results from the job.
result_sim = job_sim.result()
counts = result_sim.get_counts(circ)
print(counts)
from qiskit.visualization import plot_histogram
plot_histogram(counts)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.visualization import plot_histogram, plot_bloch_multivector
from numpy.random import randint
import numpy as np
print("Imports Successful")
qc = QuantumCircuit(1,1)
# Alice prepares qubit in state |+>
qc.h(0)
qc.barrier()
# Alice now sends the qubit to Bob
# who measures it in the X-basis
qc.h(0)
qc.measure(0,0)
# Draw and simulate circuit
display(qc.draw())
aer_sim = Aer.get_backend('aer_simulator')
job = aer_sim.run(assemble(qc))
plot_histogram(job.result().get_counts())
qc = QuantumCircuit(1,1)
# Alice prepares qubit in state |+>
qc.h(0)
# Alice now sends the qubit to Bob
# but Eve intercepts and tries to read it
qc.measure(0, 0)
qc.barrier()
# Eve then passes this on to Bob
# who measures it in the X-basis
qc.h(0)
qc.measure(0,0)
# Draw and simulate circuit
display(qc.draw())
aer_sim = Aer.get_backend('aer_simulator')
job = aer_sim.run(assemble(qc))
plot_histogram(job.result().get_counts())
np.random.seed(seed=0)
n = 100
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
print(alice_bits)
np.random.seed(seed=0)
n = 100
## Step 1
#Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
print(alice_bases)
def encode_message(bits, bases):
message = []
for i in range(n):
qc = QuantumCircuit(1,1)
if bases[i] == 0: # Prepare qubit in Z-basis
if bits[i] == 0:
pass
else:
qc.x(0)
else: # Prepare qubit in X-basis
if bits[i] == 0:
qc.h(0)
else:
qc.x(0)
qc.h(0)
qc.barrier()
message.append(qc)
return message
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
print('bit = %i' % alice_bits[0])
print('basis = %i' % alice_bases[0])
message[0].draw()
print('bit = %i' % alice_bits[4])
print('basis = %i' % alice_bases[4])
message[4].draw()
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
print(bob_bases)
def measure_message(message, bases):
backend = Aer.get_backend('aer_simulator')
measurements = []
for q in range(n):
if bases[q] == 0: # measuring in Z-basis
message[q].measure(0,0)
if bases[q] == 1: # measuring in X-basis
message[q].h(0)
message[q].measure(0,0)
aer_sim = Aer.get_backend('aer_simulator')
qobj = assemble(message[q], shots=1, memory=True)
result = aer_sim.run(qobj).result()
measured_bit = int(result.get_memory()[0])
measurements.append(measured_bit)
return measurements
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
message[0].draw()
message[6].draw()
print(bob_results)
def remove_garbage(a_bases, b_bases, bits):
good_bits = []
for q in range(n):
if a_bases[q] == b_bases[q]:
# If both used the same basis, add
# this to the list of 'good' bits
good_bits.append(bits[q])
return good_bits
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
print(alice_key)
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
print(bob_key)
def sample_bits(bits, selection):
sample = []
for i in selection:
# use np.mod to make sure the
# bit we sample is always in
# the list range
i = np.mod(i, len(bits))
# pop(i) removes the element of the
# list at index 'i'
sample.append(bits.pop(i))
return sample
np.random.seed(seed=0)
n = 100
## Step 1
# Alice generates bits
alice_bits = randint(2, size=n)
## Step 2
# Create an array to tell us which qubits
# are encoded in which bases
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Step 3
# Decide which basis to measure in:
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
## Step 5
sample_size = 15
bit_selection = randint(n, size=sample_size)
bob_sample = sample_bits(bob_key, bit_selection)
print(" bob_sample = " + str(bob_sample))
alice_sample = sample_bits(alice_key, bit_selection)
print("alice_sample = "+ str(alice_sample))
bob_sample == alice_sample
print(bob_key)
print(alice_key)
print("key length = %i" % len(alice_key))
np.random.seed(seed=3)
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
print(alice_bits)
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
print(alice_bases)
message[0].draw()
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
print(intercepted_message)
message[0].draw()
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
## Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
message[0].draw()
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
## Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
np.random.seed(seed=3)
## Step 1
alice_bits = randint(2, size=n)
## Step 2
alice_bases = randint(2, size=n)
message = encode_message(alice_bits, alice_bases)
## Interception!!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
## Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
## Step 4
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
## Step 5
sample_size = 15
bit_selection = randint(n, size=sample_size)
bob_sample = sample_bits(bob_key, bit_selection)
print(" bob_sample = " + str(bob_sample))
alice_sample = sample_bits(alice_key, bit_selection)
print("alice_sample = "+ str(alice_sample))
bob_sample == alice_sample
n = 100
# Step 1
alice_bits = randint(2, size=n)
alice_bases = randint(2, size=n)
# Step 2
message = encode_message(alice_bits, alice_bases)
# Interception!
eve_bases = randint(2, size=n)
intercepted_message = measure_message(message, eve_bases)
# Step 3
bob_bases = randint(2, size=n)
bob_results = measure_message(message, bob_bases)
# Step 4
bob_key = remove_garbage(alice_bases, bob_bases, bob_results)
alice_key = remove_garbage(alice_bases, bob_bases, alice_bits)
# Step 5
sample_size = 15 # Change this to something lower and see if
# Eve can intercept the message without Alice
# and Bob finding out
bit_selection = randint(n, size=sample_size)
bob_sample = sample_bits(bob_key, bit_selection)
alice_sample = sample_bits(alice_key, bit_selection)
if bob_sample != alice_sample:
print("Eve's interference was detected.")
else:
print("Eve went undetected!")
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
def gcd(a, b):
r = 1
while r != 0:
r = a%b
if r == 0:
break
a = b
b = r
return b
print(gcd(630, 56))
N = 20
a = 7
r_vals = np.arange(2, N)
a_power_r = np.power(a, r_vals)
print(a_power_r)
a_r_mod_N = a_power_r%N
import matplotlib.pyplot as plt
plt.plot(r_vals, a_r_mod_N)
def modulo(a, r, N):
mod = a
for i in range(r - 1):
mod = (mod*a)%N
return mod
print(modulo(7, 4, 15))
def period(a, N):
mod = a
for i in range(2, N):
mod = (mod*a)%N
if mod == 1:
return i
print(period(7, 15))
print(period(11, 31))
from qiskit import *
n = 4
circ = QuantumCircuit(n, n)
def swap(circuit, i, j):
circuit.cx(i, j)
circuit.cx(j, i)
circuit.cx(i, j)
return circuit
def unitary_7(circuit):
for i in range(n):
circuit.x(i)
circuit = swap(circuit, 1, 2)
circuit = swap(circuit, 2, 3)
circuit = swap(circuit, 0, 3)
circuit.barrier()
return circuit
# circ.x(0)
# circ.x(1)
# circ.barrier()
r = 4
for j in range(r):
circ = unitary_7(circ)
circ.draw()
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
results = job.result()
psi = results.get_statevector(circ)
for i in range(len(psi)):
print(f"{bin(i)} :" + str(abs(psi[i]**2)))
|
https://github.com/ShabaniLab/q-camp
|
ShabaniLab
|
import numpy as np
from qiskit import *
print("successful!")
# define quantum circuit
N = 15
qc = QuantumCircuit(N, N)
# add quantum gates
qc.h(0)
for i in range(N - 1):
qc.cnot(i, i + 1)
# add barrier
qc.barrier(range(N))
# add measurement gauge/device
qc.measure(range(N), range(N))
qc.draw('mpl')
# run the circuit
# get the backend
backend = Aer.get_backend('qasm_simulator')
# do the measurements/execution
job = execute(qc, backend, shots=10000)
# get result
result = job.result()
counts = result.get_counts()
print(counts)
from qiskit.visualization import plot_histogram
plot_histogram(counts)
|
https://github.com/e-eight/vqe
|
e-eight
|
import numpy as np
import networkx as nx
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, assemble
from qiskit.quantum_info import Statevector
from qiskit.aqua.algorithms import NumPyEigensolver
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import op_converter
from qiskit.aqua.operators import WeightedPauliOperator
from qiskit.visualization import plot_histogram
from qiskit.providers.aer.extensions.snapshot_statevector import *
from thirdParty.classical import rand_graph, classical, bitstring_to_path, calc_cost
from utils import mapeo_grafo
from collections import defaultdict
from operator import itemgetter
from scipy.optimize import minimize
import matplotlib.pyplot as plt
LAMBDA = 10
SEED = 10
SHOTS = 10000
# returns the bit index for an alpha and j
def bit(i_city, l_time, num_cities):
return i_city * num_cities + l_time
# e^(cZZ)
def append_zz_term(qc, q_i, q_j, gamma, constant_term):
qc.cx(q_i, q_j)
qc.rz(2*gamma*constant_term,q_j)
qc.cx(q_i, q_j)
# e^(cZ)
def append_z_term(qc, q_i, gamma, constant_term):
qc.rz(2*gamma*constant_term, q_i)
# e^(cX)
def append_x_term(qc,qi,beta):
qc.rx(-2*beta, qi)
def get_not_edge_in(G):
N = G.number_of_nodes()
not_edge = []
for i in range(N):
for j in range(N):
if i != j:
buffer_tupla = (i,j)
in_edges = False
for edge_i, edge_j in G.edges():
if ( buffer_tupla == (edge_i, edge_j) or buffer_tupla == (edge_j, edge_i)):
in_edges = True
if in_edges == False:
not_edge.append((i, j))
return not_edge
def get_classical_simplified_z_term(G, _lambda):
# recorrer la formula Z con datos grafo se va guardando en diccionario que acumula si coinciden los terminos
N = G.number_of_nodes()
E = G.edges()
# z term #
z_classic_term = [0] * N**2
# first term
for l in range(N):
for i in range(N):
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += -1 * _lambda
# second term
for l in range(N):
for j in range(N):
for i in range(N):
if i < j:
# z_il
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += _lambda / 2
# z_jl
z_jl_index = bit(j, l, N)
z_classic_term[z_jl_index] += _lambda / 2
# third term
for i in range(N):
for l in range(N):
for j in range(N):
if l < j:
# z_il
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += _lambda / 2
# z_ij
z_ij_index = bit(i, j, N)
z_classic_term[z_ij_index] += _lambda / 2
# fourth term
not_edge = get_not_edge_in(G) # include order tuples ej = (1,0), (0,1)
for edge in not_edge:
for l in range(N):
i = edge[0]
j = edge[1]
# z_il
z_il_index = bit(i, l, N)
z_classic_term[z_il_index] += _lambda / 4
# z_j(l+1)
l_plus = (l+1) % N
z_jlplus_index = bit(j, l_plus, N)
z_classic_term[z_jlplus_index] += _lambda / 4
# fifthy term
weights = nx.get_edge_attributes(G,'weight')
for edge_i, edge_j in G.edges():
weight_ij = weights.get((edge_i,edge_j))
weight_ji = weight_ij
for l in range(N):
# z_il
z_il_index = bit(edge_i, l, N)
z_classic_term[z_il_index] += weight_ij / 4
# z_jlplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_j, l_plus, N)
z_classic_term[z_jlplus_index] += weight_ij / 4
# add order term because G.edges() do not include order tuples #
# z_i'l
z_il_index = bit(edge_j, l, N)
z_classic_term[z_il_index] += weight_ji / 4
# z_j'lplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_i, l_plus, N)
z_classic_term[z_jlplus_index] += weight_ji / 4
return z_classic_term
def tsp_obj_2(x, G,_lambda):
# obtenemos el valor evaluado en f(x_1, x_2,... x_n)
not_edge = get_not_edge_in(G)
N = G.number_of_nodes()
tsp_cost=0
#Distancia
weights = nx.get_edge_attributes(G,'weight')
for edge_i, edge_j in G.edges():
weight_ij = weights.get((edge_i,edge_j))
weight_ji = weight_ij
for l in range(N):
# x_il
x_il_index = bit(edge_i, l, N)
# x_jlplus
l_plus = (l+1) % N
x_jlplus_index = bit(edge_j, l_plus, N)
tsp_cost+= int(x[x_il_index]) * int(x[x_jlplus_index]) * weight_ij
# add order term because G.edges() do not include order tuples #
# x_i'l
x_il_index = bit(edge_j, l, N)
# x_j'lplus
x_jlplus_index = bit(edge_i, l_plus, N)
tsp_cost += int(x[x_il_index]) * int(x[x_jlplus_index]) * weight_ji
#Constraint 1
for l in range(N):
penal1 = 1
for i in range(N):
x_il_index = bit(i, l, N)
penal1 -= int(x[x_il_index])
tsp_cost += _lambda * penal1**2
#Contstraint 2
for i in range(N):
penal2 = 1
for l in range(N):
x_il_index = bit(i, l, N)
penal2 -= int(x[x_il_index])
tsp_cost += _lambda*penal2**2
#Constraint 3
for edge in not_edge:
for l in range(N):
i = edge[0]
j = edge[1]
# x_il
x_il_index = bit(i, l, N)
# x_j(l+1)
l_plus = (l+1) % N
x_jlplus_index = bit(j, l_plus, N)
tsp_cost += int(x[x_il_index]) * int(x[x_jlplus_index]) * _lambda
return tsp_cost
def get_classical_simplified_zz_term(G, _lambda):
# recorrer la formula Z con datos grafo se va guardando en diccionario que acumula si coinciden los terminos
N = G.number_of_nodes()
E = G.edges()
# zz term #
zz_classic_term = [[0] * N**2 for i in range(N**2) ]
# first term
for l in range(N):
for j in range(N):
for i in range(N):
if i < j:
# z_il
z_il_index = bit(i, l, N)
# z_jl
z_jl_index = bit(j, l, N)
zz_classic_term[z_il_index][z_jl_index] += _lambda / 2
# second term
for i in range(N):
for l in range(N):
for j in range(N):
if l < j:
# z_il
z_il_index = bit(i, l, N)
# z_ij
z_ij_index = bit(i, j, N)
zz_classic_term[z_il_index][z_ij_index] += _lambda / 2
# third term
not_edge = get_not_edge_in(G)
for edge in not_edge:
for l in range(N):
i = edge[0]
j = edge[1]
# z_il
z_il_index = bit(i, l, N)
# z_j(l+1)
l_plus = (l+1) % N
z_jlplus_index = bit(j, l_plus, N)
zz_classic_term[z_il_index][z_jlplus_index] += _lambda / 4
# fourth term
weights = nx.get_edge_attributes(G,'weight')
for edge_i, edge_j in G.edges():
weight_ij = weights.get((edge_i,edge_j))
weight_ji = weight_ij
for l in range(N):
# z_il
z_il_index = bit(edge_i, l, N)
# z_jlplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_j, l_plus, N)
zz_classic_term[z_il_index][z_jlplus_index] += weight_ij / 4
# add order term because G.edges() do not include order tuples #
# z_i'l
z_il_index = bit(edge_j, l, N)
# z_j'lplus
l_plus = (l+1) % N
z_jlplus_index = bit(edge_i, l_plus, N)
zz_classic_term[z_il_index][z_jlplus_index] += weight_ji / 4
return zz_classic_term
def get_classical_simplified_hamiltonian(G, _lambda):
# z term #
z_classic_term = get_classical_simplified_z_term(G, _lambda)
# zz term #
zz_classic_term = get_classical_simplified_zz_term(G, _lambda)
return z_classic_term, zz_classic_term
def get_cost_circuit(G, gamma, _lambda):
N = G.number_of_nodes()
N_square = N**2
qc = QuantumCircuit(N_square,N_square)
z_classic_term, zz_classic_term = get_classical_simplified_hamiltonian(G, _lambda)
# z term
for i in range(N_square):
if z_classic_term[i] != 0:
append_z_term(qc, i, gamma, z_classic_term[i])
# zz term
for i in range(N_square):
for j in range(N_square):
if zz_classic_term[i][j] != 0:
append_zz_term(qc, i, j, gamma, zz_classic_term[i][j])
return qc
def get_mixer_operator(G,beta):
N = G.number_of_nodes()
qc = QuantumCircuit(N**2,N**2)
for n in range(N**2):
append_x_term(qc, n, beta)
return qc
def get_QAOA_circuit(G, beta, gamma, _lambda):
assert(len(beta)==len(gamma))
N = G.number_of_nodes()
qc = QuantumCircuit(N**2,N**2)
# init min mix state
qc.h(range(N**2))
p = len(beta)
for i in range(p):
qc = qc.compose(get_cost_circuit(G, gamma[i], _lambda))
qc = qc.compose(get_mixer_operator(G, beta[i]))
qc.barrier(range(N**2))
qc.snapshot_statevector("final_state")
qc.measure(range(N**2),range(N**2))
return qc
def invert_counts(counts):
return {k[::-1] :v for k,v in counts.items()}
# Sample expectation value
def compute_tsp_energy_2(counts, G):
energy = 0
get_counts = 0
total_counts = 0
for meas, meas_count in counts.items():
obj_for_meas = tsp_obj_2(meas, G, LAMBDA)
energy += obj_for_meas*meas_count
total_counts += meas_count
mean = energy/total_counts
return mean
def get_black_box_objective_2(G,p):
backend = Aer.get_backend('qasm_simulator')
sim = Aer.get_backend('aer_simulator')
# function f costo
def f(theta):
beta = theta[:p]
gamma = theta[p:]
# Anzats
qc = get_QAOA_circuit(G, beta, gamma, LAMBDA)
result = execute(qc, backend, seed_simulator=SEED, shots= SHOTS).result()
final_state_vector = result.data()["snapshots"]["statevector"]["final_state"][0]
state_vector = Statevector(final_state_vector)
probabilities = state_vector.probabilities()
probabilities_states = invert_counts(state_vector.probabilities_dict())
expected_value = 0
for state,probability in probabilities_states.items():
cost = tsp_obj_2(state, G, LAMBDA)
expected_value += cost*probability
counts = result.get_counts()
mean = compute_tsp_energy_2(invert_counts(counts),G)
return mean
return f
def crear_grafo(cantidad_ciudades):
pesos, conexiones = None, None
mejor_camino = None
while not mejor_camino:
pesos, conexiones = rand_graph(cantidad_ciudades)
mejor_costo, mejor_camino = classical(pesos, conexiones, loop=False)
G = mapeo_grafo(conexiones, pesos)
return G, mejor_costo, mejor_camino
def run_QAOA(p,ciudades, grafo):
if grafo == None:
G, mejor_costo, mejor_camino = crear_grafo(ciudades)
print("Mejor Costo")
print(mejor_costo)
print("Mejor Camino")
print(mejor_camino)
print("Bordes del grafo")
print(G.edges())
print("Nodos")
print(G.nodes())
print("Pesos")
labels = nx.get_edge_attributes(G,'weight')
print(labels)
else:
G = grafo
intial_random = []
# beta, mixer Hammiltonian
for i in range(p):
intial_random.append(np.random.uniform(0,np.pi))
# gamma, cost Hammiltonian
for i in range(p):
intial_random.append(np.random.uniform(0,2*np.pi))
init_point = np.array(intial_random)
obj = get_black_box_objective_2(G,p)
res_sample = minimize(obj, init_point,method="COBYLA",options={"maxiter":2500,"disp":True})
print(res_sample)
if __name__ == '__main__':
# Run QAOA parametros: profundidad p, numero d ciudades,
run_QAOA(5, 3, None)
|
https://github.com/e-eight/vqe
|
e-eight
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Variational Quantum Eigensolver algorithm.
See https://arxiv.org/abs/1304.3061
"""
import logging
import functools
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit
from qiskit.aqua.algorithms.adaptive.vq_algorithm import VQAlgorithm
from qiskit.aqua import AquaError, Pluggable, PluggableType, get_pluggable_class
from qiskit.aqua.utils.backend_utils import is_aer_statevector_backend
from qiskit.aqua.utils import find_regs_by_name
logger = logging.getLogger(__name__)
class VQE(VQAlgorithm):
"""
The Variational Quantum Eigensolver algorithm.
See https://arxiv.org/abs/1304.3061
"""
CONFIGURATION = {
'name': 'VQE',
'description': 'VQE Algorithm',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'vqe_schema',
'type': 'object',
'properties': {
'operator_mode': {
'type': 'string',
'default': 'matrix',
'oneOf': [
{'enum': ['matrix', 'paulis', 'grouped_paulis']}
]
},
'initial_point': {
'type': ['array', 'null'],
"items": {
"type": "number"
},
'default': None
},
'max_evals_grouped': {
'type': 'integer',
'default': 1
}
},
'additionalProperties': False
},
'problems': ['energy', 'ising'],
'depends': [
{'pluggable_type': 'optimizer',
'default': {
'name': 'L_BFGS_B'
}
},
{'pluggable_type': 'variational_form',
'default': {
'name': 'RYRZ'
}
},
],
}
def __init__(self, operator, var_form, optimizer, operator_mode='matrix',
initial_point=None, max_evals_grouped=1, aux_operators=None, callback=None):
"""Constructor.
Args:
operator (Operator): Qubit operator
operator_mode (str): operator mode, used for eval of operator
var_form (VariationalForm): parametrized variational form.
optimizer (Optimizer): the classical optimization algorithm.
initial_point (numpy.ndarray): optimizer initial point.
max_evals_grouped (int): max number of evaluations performed simultaneously
aux_operators (list of Operator): Auxiliary operators to be evaluated at each eigenvalue
callback (Callable): a callback that can access the intermediate data during the optimization.
Internally, four arguments are provided as follows
the index of evaluation, parameters of variational form,
evaluated mean, evaluated standard devation.
"""
self.validate(locals())
super().__init__(var_form=var_form,
optimizer=optimizer,
cost_fn=self._energy_evaluation,
initial_point=initial_point)
self._optimizer.set_max_evals_grouped(max_evals_grouped)
self._callback = callback
if initial_point is None:
self._initial_point = var_form.preferred_init_points
self._operator = operator
self._operator_mode = operator_mode
self._eval_count = 0
if aux_operators is None:
self._aux_operators = []
else:
self._aux_operators = [aux_operators] if not isinstance(aux_operators, list) else aux_operators
logger.info(self.print_settings())
@classmethod
def init_params(cls, params, algo_input):
"""
Initialize via parameters dictionary and algorithm input instance.
Args:
params (dict): parameters dictionary
algo_input (EnergyInput): EnergyInput instance
Returns:
VQE: vqe object
"""
if algo_input is None:
raise AquaError("EnergyInput instance is required.")
operator = algo_input.qubit_op
vqe_params = params.get(Pluggable.SECTION_KEY_ALGORITHM)
operator_mode = vqe_params.get('operator_mode')
initial_point = vqe_params.get('initial_point')
max_evals_grouped = vqe_params.get('max_evals_grouped')
# Set up variational form, we need to add computed num qubits
# Pass all parameters so that Variational Form can create its dependents
var_form_params = params.get(Pluggable.SECTION_KEY_VAR_FORM)
var_form_params['num_qubits'] = operator.num_qubits
var_form = get_pluggable_class(PluggableType.VARIATIONAL_FORM,
var_form_params['name']).init_params(params)
# Set up optimizer
opt_params = params.get(Pluggable.SECTION_KEY_OPTIMIZER)
optimizer = get_pluggable_class(PluggableType.OPTIMIZER,
opt_params['name']).init_params(params)
return cls(operator, var_form, optimizer, operator_mode=operator_mode,
initial_point=initial_point, max_evals_grouped=max_evals_grouped,
aux_operators=algo_input.aux_ops)
@property
def setting(self):
"""Prepare the setting of VQE as a string."""
ret = "Algorithm: {}\n".format(self._configuration['name'])
params = ""
for key, value in self.__dict__.items():
if key != "_configuration" and key[0] == "_":
if "initial_point" in key and value is None:
params += "-- {}: {}\n".format(key[1:], "Random seed")
else:
params += "-- {}: {}\n".format(key[1:], value)
ret += "{}".format(params)
return ret
def print_settings(self):
"""
Preparing the setting of VQE into a string.
Returns:
str: the formatted setting of VQE
"""
ret = "\n"
ret += "==================== Setting of {} ============================\n".format(self.configuration['name'])
ret += "{}".format(self.setting)
ret += "===============================================================\n"
ret += "{}".format(self._var_form.setting)
ret += "===============================================================\n"
ret += "{}".format(self._optimizer.setting)
ret += "===============================================================\n"
return ret
def construct_circuit(self, parameter, backend=None, use_simulator_operator_mode=False):
"""Generate the circuits.
Args:
parameters (numpy.ndarray): parameters for variational form.
backend (qiskit.BaseBackend): backend object.
use_simulator_operator_mode (bool): is backend from AerProvider, if True and mode is paulis,
single circuit is generated.
Returns:
[QuantumCircuit]: the generated circuits with Hamiltonian.
"""
input_circuit = self._var_form.construct_circuit(parameter)
if backend is None:
warning_msg = "Circuits used in VQE depends on the backend type, "
from qiskit import BasicAer
if self._operator_mode == 'matrix':
temp_backend_name = 'statevector_simulator'
else:
temp_backend_name = 'qasm_simulator'
backend = BasicAer.get_backend(temp_backend_name)
warning_msg += "since operator_mode is '{}', '{}' backend is used.".format(
self._operator_mode, temp_backend_name)
logger.warning(warning_msg)
circuit = self._operator.construct_evaluation_circuit(self._operator_mode,
input_circuit, backend, use_simulator_operator_mode)
return circuit
def _eval_aux_ops(self, threshold=1e-12, params=None):
if params is None:
params = self.optimal_params
wavefn_circuit = self._var_form.construct_circuit(params)
circuits = []
values = []
params = []
for operator in self._aux_operators:
if not operator.is_empty():
temp_circuit = QuantumCircuit() + wavefn_circuit
circuit = operator.construct_evaluation_circuit(self._operator_mode, temp_circuit,
self._quantum_instance.backend,
self._use_simulator_operator_mode)
params.append(operator.aer_paulis)
else:
circuit = None
circuits.append(circuit)
if len(circuits) > 0:
to_be_simulated_circuits = functools.reduce(lambda x, y: x + y, [c for c in circuits if c is not None])
if self._use_simulator_operator_mode:
extra_args = {'expectation': {
'params': params,
'num_qubits': self._operator.num_qubits}
}
else:
extra_args = {}
result = self._quantum_instance.execute(to_be_simulated_circuits, **extra_args)
for operator, circuit in zip(self._aux_operators, circuits):
if circuit is None:
mean, std = 0.0, 0.0
else:
mean, std = operator.evaluate_with_result(self._operator_mode,
circuit, self._quantum_instance.backend,
result, self._use_simulator_operator_mode)
mean = mean.real if abs(mean.real) > threshold else 0.0
std = std.real if abs(std.real) > threshold else 0.0
values.append((mean, std))
if len(values) > 0:
aux_op_vals = np.empty([1, len(self._aux_operators), 2])
aux_op_vals[0, :] = np.asarray(values)
self._ret['aux_ops'] = aux_op_vals
def _run(self):
"""
Run the algorithm to compute the minimum eigenvalue.
Returns:
Dictionary of results
"""
if not self._quantum_instance.is_statevector and self._operator_mode == 'matrix':
logger.warning('Qasm simulation does not work on {} mode, changing '
'the operator_mode to "paulis"'.format(self._operator_mode))
self._operator_mode = 'paulis'
self._use_simulator_operator_mode = \
is_aer_statevector_backend(self._quantum_instance.backend) \
and self._operator_mode != 'matrix'
self._quantum_instance.circuit_summary = True
self._eval_count = 0
self._ret = self.find_minimum(initial_point=self.initial_point,
var_form=self.var_form,
cost_fn=self._energy_evaluation,
optimizer=self.optimizer)
if self._ret['num_optimizer_evals'] is not None and self._eval_count >= self._ret['num_optimizer_evals']:
self._eval_count = self._ret['num_optimizer_evals']
self._eval_time = self._ret['eval_time']
logger.info('Optimization complete in {} seconds.\nFound opt_params {} in {} evals'.format(
self._eval_time, self._ret['opt_params'], self._eval_count))
self._ret['eval_count'] = self._eval_count
self._ret['energy'] = self.get_optimal_cost()
self._ret['eigvals'] = np.asarray([self.get_optimal_cost()])
self._ret['eigvecs'] = np.asarray([self.get_optimal_vector()])
self._eval_aux_ops()
return self._ret
# This is the objective function to be passed to the optimizer that is uses for evaluation
def _energy_evaluation(self, parameters):
"""
Evaluate energy at given parameters for the variational form.
Args:
parameters (numpy.ndarray): parameters for variational form.
Returns:
float or list of float: energy of the hamiltonian of each parameter.
"""
num_parameter_sets = len(parameters) // self._var_form.num_parameters
circuits = []
parameter_sets = np.split(parameters, num_parameter_sets)
mean_energy = []
std_energy = []
for idx in range(len(parameter_sets)):
parameter = parameter_sets[idx]
circuit = self.construct_circuit(parameter, self._quantum_instance.backend, self._use_simulator_operator_mode)
circuits.append(circuit)
to_be_simulated_circuits = functools.reduce(lambda x, y: x + y, circuits)
if self._use_simulator_operator_mode:
extra_args = {'expectation': {
'params': [self._operator.aer_paulis],
'num_qubits': self._operator.num_qubits}
}
else:
extra_args = {}
result = self._quantum_instance.execute(to_be_simulated_circuits, **extra_args)
for idx in range(len(parameter_sets)):
mean, std = self._operator.evaluate_with_result(
self._operator_mode, circuits[idx], self._quantum_instance.backend, result, self._use_simulator_operator_mode)
mean_energy.append(np.real(mean))
std_energy.append(np.real(std))
self._eval_count += 1
if self._callback is not None:
self._callback(self._eval_count, parameter_sets[idx], np.real(mean), np.real(std))
logger.info('Energy evaluation {} returned {}'.format(self._eval_count, np.real(mean)))
return mean_energy if len(mean_energy) > 1 else mean_energy[0]
def get_optimal_cost(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot return optimal cost before running the algorithm to find optimal params.")
return self._ret['min_val']
def get_optimal_circuit(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot find optimal circuit before running the algorithm to find optimal params.")
return self._var_form.construct_circuit(self._ret['opt_params'])
def get_optimal_vector(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot find optimal vector before running the algorithm to find optimal params.")
qc = self.get_optimal_circuit()
if self._quantum_instance.is_statevector:
ret = self._quantum_instance.execute(qc)
self._ret['min_vector'] = ret.get_statevector(qc, decimals=16)
else:
c = ClassicalRegister(qc.width(), name='c')
q = find_regs_by_name(qc, 'q')
qc.add_register(c)
qc.barrier(q)
qc.measure(q, c)
ret = self._quantum_instance.execute(qc)
self._ret['min_vector'] = ret.get_counts(qc)
return self._ret['min_vector']
@property
def optimal_params(self):
if 'opt_params' not in self._ret:
raise AquaError("Cannot find optimal params before running the algorithm.")
return self._ret['opt_params']
|
https://github.com/icepolarizer/qwgen
|
icepolarizer
|
#!/usr/bin/env python3
# Dependency: qiskit, pyperclip
def warn (* args, **kwargs):
pass
import warnings
warnings.warn = warn
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--clipboard", action="store_true",
help="paste to the clipboard")
parser.add_argument("-l", "--length", type=int,
help="password length")
args = parser.parse_args()
import string, math
from qiskit import *
import pyperclip
table = string.ascii_uppercase + string.ascii_lowercase + string.digits
circ = QuantumCircuit(6)
for q in range(6):
circ.h(q)
circ.measure_all()
backend_sim = Aer.get_backend('qasm_simulator')
def rand_int():
rand = 62
while rand > 61:
job_sim = execute(circ, backend_sim, shots=1)
result_sim = job_sim.result()
count = result_sim.get_counts(circ)
bits = max(count, key=lambda i: count[i])[:6]
rand = int(bits, 2)
return rand
pwlen = 8
if args.length:
pwlen = args.length
if args.clipboard:
pyperclip.copy(''.join(table[rand_int()] for _ in range(pwlen)))
print("Random password copied to clipboard")
else:
for i in range(20):
for j in range(8):
pw = ''.join(table[rand_int()] for _ in range(pwlen))
print(pw+' ', end='')
print('\n', end='')
|
https://github.com/soumya-s3/qiskit_developer_test_notebook
|
soumya-s3
|
import numpy as np
from qiskit import QuantumCircuit
# Building the circuit
# Create a Quantum Circuit acting on a quantum register of three qubits
circ = QuantumCircuit(3)
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state.
circ.cx(0, 1)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting
# the qubits in a GHZ state.
circ.cx(0, 2)
# We can visualise the circuit using QuantumCircuit.draw()
circ.draw('mpl')
from qiskit.quantum_info import Statevector
# Set the initial state of the simulator to the ground state using from_int
state = Statevector.from_int(0, 2**3)
# Evolve the state by the quantum circuit
state = state.evolve(circ)
#draw using latex
state.draw('latex')
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
# Build
#------
# Create a Quantum Circuit acting on the q register
circuit = QuantumCircuit(2, 2)
# Add a H gate on qubit 0
circuit.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1
circuit.cx(0, 1)
# Map the quantum measurement to the classical bits
circuit.measure([0,1], [0,1])
# END
# Execute
#--------
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator
job = execute(circuit, simulator, shots=1000)
# Grab results from the job
result = job.result()
# Return counts
counts = result.get_counts(circuit)
print("\nTotal count for 00 and 11 are:",counts)
# END
# Visualize
#----------
# Using QuantumCircuit.draw(), as in previous example
circuit.draw('mpl')
# Analyze
#--------
# Plot a histogram
plot_histogram(counts)
# END
<h3 style="color: rgb(0, 125, 65)"><i>References</i></h3>
https://qiskit.org/documentation/tutorials.html
https://www.youtube.com/watch?v=P5cGeDKOIP0
https://qiskit.org/documentation/tutorials/circuits/01_circuit_basics.html
https://docs.quantum-computing.ibm.com/lab/first-circuit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.