repo
stringclasses 900
values | file
stringclasses 754
values | content
stringlengths 4
215k
|
|---|---|---|
https://github.com/alvinli04/Quantum-Steganography
|
alvinli04
|
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, AncillaRegister
from qiskit import execute
from qiskit import Aer
from qiskit import IBMQ
from qiskit.compiler import transpile
import neqr
import random
import steganography
def arraynxn(n):
return [[random.randint(0,255) for i in range(n)] for j in range(n)]
'''
NEQR Unit Tests
'''
def convert_to_bits_test():
array2x2 = [[random.randint(0, 255), random.randint(0, 255)], [random.randint(0, 255), random.randint(0, 255)]]
print(array2x2)
bits_arr = neqr.convert_to_bits(array2x2)
print(bits_arr)
def neqr_test():
testarr = arraynxn(4)
print('test array:')
print(testarr)
flattened_array = neqr.convert_to_bits(testarr)
print([''.join([str(b) for i,b in enumerate(a)]) for a in flattened_array])
idx = QuantumRegister(4)
intensity = QuantumRegister(8)
result_circuit = QuantumCircuit(intensity, idx)
neqr.neqr(flattened_array, result_circuit, idx, intensity)
backend = Aer.get_backend('statevector_simulator')
job = execute(result_circuit, backend=backend, shots=1, memory=True)
job_result = job.result()
statevec = job_result.get_statevector(result_circuit)
for i in range(len(statevec)):
if statevec[i] != 0:
print(f"{format(i, '012b')}: {statevec[i].real}")
print(result_circuit)
############################################################################################################################
'''
Steganography Unit Tests
'''
def comparator_test():
#creating registers and adding them to circuit
regX = QuantumRegister(4)
regY = QuantumRegister(4)
circuit = QuantumCircuit(regX, regY)
cr = ClassicalRegister(2)
#changing registers to make them different
circuit.x(regX[0])
circuit.x(regX[2])
circuit.x(regY[1])
circuit.x(regY[3])
result = QuantumRegister(2)
circuit.add_register(result)
circuit.add_register(cr)
#comparator returns the circuit
steganography.comparator(regY, regX, circuit, result)
#result --> ancillas from function
circuit.measure(result, cr)
#measuring
simulator = Aer.get_backend('aer_simulator')
simulation = execute(circuit, simulator, shots=1, memory=True)
simResult = simulation.result()
counts = simResult.get_counts(circuit)
for(state, count) in counts.items():
big_endian_state = state[::-1]
print(big_endian_state)
def coordinate_comparator_test():
regXY = QuantumRegister(2)
regAB = QuantumRegister(2)
result = QuantumRegister(1)
circuit = QuantumCircuit(regXY, regAB, result)
#uncomment this to make the registers different
#regXY is in |00> and regAB in |01>
#circuit.x(regAB[1])
#circuit.x(regXY[1])
steganography.coordinate_comparator(circuit, result, regXY, regAB)
print(circuit)
backend = Aer.get_backend('statevector_simulator')
simulation = execute(circuit, backend=backend, shots=1, memory=True)
simResult = simulation.result()
statevec = simResult.get_statevector(circuit)
for state in range(len(statevec)):
if statevec[state] != 0:
#note: output is in little endian
print(f"{format(state, '05b')}: {statevec[state].real}")
def difference_test():
regY = QuantumRegister(4, "regY")
regX = QuantumRegister(4, "regX")
difference = QuantumRegister(4, 'difference')
cr = ClassicalRegister(4, 'measurement')
circuit = QuantumCircuit(regY, regX, difference, cr)
circuit.barrier()
steganography.difference(circuit, regY, regX, difference)
#print(circuit.draw())
circuit.measure(difference, cr)
simulator = Aer.get_backend('aer_simulator')
simulation = execute(circuit, simulator, shots=1)
result = simulation.result()
counts = result.get_counts(circuit)
for(state, count) in counts.items():
big_endian_state = state[::-1]
print(big_endian_state)
def get_secret_image_test():
test_arr = [[[random.randint(0,1) for i in range(4)] for j in range(4)] for k in range(5)]
test_result = steganography.get_secret_image(5, test_arr)
for a in test_arr:
print(a)
print(f'result:\n {test_result}')
def invert_test():
test_arr = arraynxn(4)
print(neqr.convert_to_bits(test_arr))
idx = QuantumRegister(4)
intensity = QuantumRegister(8)
inverse = QuantumRegister(8)
circuit = QuantumCircuit(inverse, intensity, idx)
neqr.neqr(neqr.convert_to_bits(test_arr), circuit, idx, intensity)
steganography.invert(circuit, intensity, inverse)
backend = Aer.get_backend('statevector_simulator')
simulation = execute(circuit, backend=backend, shots=1, memory=True)
simResult = simulation.result()
statevec = simResult.get_statevector(circuit)
for state in range(len(statevec)):
if statevec[state] != 0:
#note: output is in little endian
#only have to look at first bit
print(f"{format(state, '012b')}: {statevec[state].real}")
def get_key_test():
test_cover = arraynxn(2)
test_secret = arraynxn(2)
print(f'cover:\n {test_cover} \nsecret:\n{test_secret}')
sz = 4
cover_idx, cover_intensity = QuantumRegister(2), QuantumRegister(8)
cover = QuantumCircuit(cover_intensity, cover_idx)
secret_idx, secret_intensity = QuantumRegister(2), QuantumRegister(8)
secret = QuantumCircuit(secret_intensity, secret_idx)
neqr.neqr(neqr.convert_to_bits(test_cover), cover, cover_idx, cover_intensity)
neqr.neqr(neqr.convert_to_bits(test_secret), secret, secret_idx, secret_intensity)
key_idx, key_result = QuantumRegister(2), QuantumRegister(1)
inv = QuantumRegister(8)
diff1 = QuantumRegister(8)
diff2 = QuantumRegister(8)
comp_res = QuantumRegister(2)
key_mes = ClassicalRegister(3)
circuit = QuantumCircuit(cover_intensity, cover_idx, secret_intensity, secret_idx, key_idx, key_result, inv, diff1, diff2, comp_res, key_mes)
steganography.invert(circuit, secret_intensity, inv)
steganography.get_key(circuit, key_idx, key_result, cover_intensity, secret_intensity, inv, diff1, diff2, comp_res, sz)
circuit.measure(key_result[:] + key_idx[:], key_mes)
provider = IBMQ.load_account()
simulator = provider.get_backend('simulator_mps')
simulation = execute(circuit, simulator, shots=1024)
result = simulation.result()
counts = result.get_counts(circuit)
for(state, count) in counts.items():
big_endian_state = state[::-1]
print(f"Measured {big_endian_state} {count} times.")
def load_test():
idx = QuantumRegister(2)
odx = QuantumRegister(1)
cr = ClassicalRegister(3)
qc = QuantumCircuit(idx, odx, cr)
qc.h(idx)
qc.measure(idx[:] + odx[:], cr)
simulator = Aer.get_backend("aer_simulator")
simulation = execute(qc, simulator, shots=1024)
result = simulation.result()
counts = result.get_counts(qc)
for(state, count) in counts.items():
big_endian_state = state[::-1]
print(f"Measured {big_endian_state} {count} times.")
def main():
get_key_test()
if __name__ == '__main__':
main()
|
https://github.com/Aurelien-Pelissier/IBMQ-Quantum-Programming
|
Aurelien-Pelissier
|
from qiskit import*
from qiskit.providers.aer import QasmSimulator
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
quantum_register = QuantumRegister(2) # Linhas do circuito / numeros de Qbit no circuito
""" Para medir, em mecanica quรขntica, ao medir um Qbit nรณs destruimos a informaรงรฃo daquele estado
e armazenamos ela em um bit clรกssico """
classic_register = ClassicalRegister(2)
first_circuit = QuantumCircuit(quantum_register, classic_register) # Constrรณi o circuito
first_circuit.draw(output = 'mpl') # Desenha o circuito, funciona no app da IBM
first_circuit.h(quantum_register[0]) # Aplicando a primeira gate no primeira linha, gate hadarmat
# Aplicando a porta CNOT
###
first_circuit.draw(output = 'mpl')
###
first_circuit.cx(quantum_register[0], quantum_register[1]) #CNOT faz operaรงรฃo tensorial entre o Qbit
#de controle na linha zero, e o outro รฉ o Qbit alvo
###
first_circuit.draw(output = 'mpl')
###
first_circuit.measure(quantum_register, classic_register) # para extrair a medida
###
first_circuit.draw(output = 'mpl')
###
simulator = QasmSimulator() # Simulador que vai realizar os calculos para nรณs
result = execute(first_circuit, backend= simulator).result()
counts = result.get_counts(first_circuit)
first_circuit.draw(output='mpl')
plot_histogram(counts)
plt.ylabel(counts)
plt.show()
"""
IBMQ.load_account()
host = IBMQ.get_provider('ibm-q')
quantum_computer = host.get_backend('ibmq_belem')
result_qcomputer = execute(first_circuit, backend= quantum_computer)
job_monitor(result_qcomputer)
result = result_qcomputer.result()
plot_histogram(result.get_counts(first_circuit))
plt.ylabel(counts)
plt.show()
"""
|
https://github.com/Aurelien-Pelissier/IBMQ-Quantum-Programming
|
Aurelien-Pelissier
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, visualization
from random import randint
def to_binary(N,n_bit):
Nbin = np.zeros(n_bit, dtype=bool)
for i in range(1,n_bit+1):
bit_state = (N % (2**i) != 0)
if bit_state:
N -= 2**(i-1)
Nbin[n_bit-i] = bit_state
return Nbin
def modular_multiplication(qc,a,N):
"""
applies the unitary operator that implements
modular multiplication function x -> a*x(modN)
Only works for the particular case x -> 7*x(mod15)!
"""
for i in range(0,3):
qc.x(i)
qc.cx(2,1)
qc.cx(1,2)
qc.cx(2,1)
qc.cx(1,0)
qc.cx(0,1)
qc.cx(1,0)
qc.cx(3,0)
qc.cx(0,1)
qc.cx(1,0)
def quantum_period(a, N, n_bit):
# Quantum part
print(" Searching the period for N =", N, "and a =", a)
qr = QuantumRegister(n_bit)
cr = ClassicalRegister(n_bit)
qc = QuantumCircuit(qr,cr)
simulator = Aer.get_backend('qasm_simulator')
s0 = randint(1, N-1) # Chooses random int
sbin = to_binary(s0,n_bit) # Turns to binary
print("\n Starting at \n s =", s0, "=", "{0:b}".format(s0), "(bin)")
# Quantum register is initialized with s (in binary)
for i in range(0,n_bit):
if sbin[n_bit-i-1]:
qc.x(i)
s = s0
r=-1 # makes while loop run at least 2 times
# Applies modular multiplication transformation until we come back to initial number s
while s != s0 or r <= 0:
r+=1
# sets up circuit structure
qc.measure(qr, cr)
modular_multiplication(qc,a,N)
qc.draw('mpl')
# runs circuit and processes data
job = execute(qc,simulator, shots=10)
result_counts = job.result().get_counts(qc)
result_histogram_key = list(result_counts)[0] # https://qiskit.org/documentation/stubs/qiskit.result.Result.get_counts.html#qiskit.result.Result.get_counts
s = int(result_histogram_key, 2)
print(" ", result_counts)
plt.show()
print("\n Found period r =", r)
return r
if __name__ == '__main__':
a = 7
N = 15
n_bit=5
r = quantum_period(a, N, n_bit)
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.providers.aer import noise
import pprint
# Choose a real device to simulate
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_vigo')
properties = device.properties()
coupling_map = device.configuration().coupling_map
# Generate an Aer noise model for device
noise_model = noise.device.basic_device_noise_model(properties)
basis_gates = noise_model.basis_gates
test2 = noise_model.to_dict()
print("Noise Model")
pprint.pprint(test2)
print("\nCoupling Map")
print(coupling_map)
print("\nProperties")
test = properties.to_dict()
pprint.pprint(test)
#print(properties)
# Generate a quantum circuit
#qc = QuantumCircuit(2, 2)
#qc.h(0)
#qc.cx(0, 1)
#qc.measure([0, 1], [0, 1])
# Perform noisy simulation
#backend = Aer.get_backend('qasm_simulator')
'''
job_sim = execute(qc, backend,
coupling_map=coupling_map,
noise_model=noise_model,
basis_gates=basis_gates)
'''
#sim_result = job_sim.result()
#print(sim_result.get_counts(qc))
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import operator
import itertools
#from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute, IBMQ
from qiskit.providers.ibmq import least_busy
from collections import OrderedDict
# AER is for simulators
from qiskit import Aer
from qiskit import QuantumCircuit
from qiskit import ClassicalRegister
from qiskit import QuantumRegister
from qiskit import execute
from qiskit import IBMQ
#from qiskit.providers.ibmq.managed import IBMQJobManager
from qiskit.tools.monitor import job_monitor
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.visualization import circuit_drawer
from sympy import Matrix, pprint, MatrixSymbol, expand, mod_inverse
from qjob import QJob
def blackbox(period_string):
#### Blackbox Function #####
# QP's don't care about this, we do#
#############################
# Copy first register to second by using CNOT gates
for i in range(n):
#simonCircuit.cx(qr[i],qr[n+i])
simonCircuit.cx(qr[i],qr[n+i])
# get the small index j such it's "1"
j = -1
#reverse the string so that it takes
s = period_string[::-1]
for i, c in enumerate(s):
if c == "1":
j = i
break
# 1-1 and 2-1 mapping with jth qubit
# x is control to xor 2nd qubit with a
for i, c in enumerate(s):
if c == "1" and j >= 0:
#simonCircuit.x(qr[j])
simonCircuit.cx(qr[j], qr[n+i]) #the i-th qubit is flipped if s_i is 1
#simonCircuit.x(qr[j])
# Random peemutation
# This part is how we can get by with 1 query of the oracle and better
# simulates quantum behavior we'd expect
perm = list(np.random.permutation(n))
# init position
init = list(range(n))
i = 0
while i < n:
if init[i] != perm[i]:
k = perm.index(init[i])
simonCircuit.swap(qr[n+i],qr[n+k]) #swap gate on qubits
init[i], init[k] = init[k], init[i] # mark the swapped qubits
else:
i += 1
# Randomly flip qubit
# Seed random numbers for predictability / benchmark
for i in range(n):
if np.random.random() > 0.5:
simonCircuit.x(qr[n+i])
simonCircuit.barrier()
### END OF BLACKBOX FUNCTION
return simonCircuit
def run_circuit(circuit,backend):
# Default for this backend seems to be 1024 ibmqx2
shots = 1024
job = execute(simonCircuit,backend=backend, shots=shots)
job_monitor(job,interval=2)
results = job.result()
return results
'''
counts = results.get_counts()
#print("Getting Results...\n")
#print(qcounts)
#print("")
print("Submitting to IBM Q...\n")
print("\nIBM Q Backend %s: Resulting Values and Probabilities" % qbackend)
print("===============================================\n")
print("Simulated Runs:",shots,"\n")
# period, counts, prob,a0,a1,...,an
#
for key, val in qcounts.items():
prob = val / shots
print("Period:", key, ", Counts:", val, ", Probability:", prob)
print("")
'''
def guass_elim(results):
# Classical post processing via Guassian elimination for the linear equations
# Y a = 0
# k[::-1], we reverse the order of the bitstring
equations = list()
lAnswer = [ (k[::-1],v) for k,v in results.get_counts().items() if k != "0"*n ]
# Sort basis by probabilities
lAnswer.sort(key = lambda x: x[1], reverse=True)
Y = []
for k, v in lAnswer:
Y.append( [ int(c) for c in k ] )
Y = Matrix(Y)
Y_transformed = Y.rref(iszerofunc=lambda x: x % 2==0)
# convert rational and negatives in rref
def mod(x,modulus):
numer,denom = x.as_numer_denom()
return numer*mod_inverse(denom,modulus) % modulus
# Deal with negative and fractial values
Y_new = Y_transformed[0].applyfunc(lambda x: mod(x,2))
#print("\nThe hidden period a0, ... a%d only satisfies these equations:" %(n-1))
#print("===============================================================\n")
rows,cols = Y_new.shape
for r in range(rows):
Yr = [ "a"+str(i)+"" for i,v in enumerate(list(Y_new[r,:])) if v==1]
if len(Yr) > 0:
tStr = " xor ".join(Yr)
#single value is 0, only xor with perid string 0 to get
if len(tStr) == 2:
equations.append("period xor" + " 0 " + " = 0")
else:
equations.append("period" + " xor " + tStr + " = 0")
return equations
def print_list(results):
# Sort list by value
sorted_x = sorted(qcounts.items(), key=operator.itemgetter(1), reverse=True)
print("Sorted list of result strings by counts")
print("======================================\n")
for i in sorted_x:
print(i)
#print(sorted_x)
print("")
## easily create period strings
## We want to avoid using anything with all 0's as that gives us false results
## because anything mod2 00 will give results
def create_period_str(strlen):
str_list = list()
for i in itertools.product([0,1],repeat=strlen):
if "1" in ("".join(map(str,i))):
#print("".join(map(str,i)))
str_list.append("".join(map(str,i)))
return str_list
## function to get dot product of result string with the period string to verify, result should be 0
#check the wikipedia for simons formula
# DOT PRODUCT IS MOD 2 !!!!
# Result XOR ?? = 0 -- this is what we're looking for!
# We have to verify the period string with the ouptput using mod_2 addition aka XOR
# Simply xor the period string with the output string
# Simply xor the period string with the output string, result must be 0 or 0b0
def verify_string(ostr, pstr):
"""
Verify string with period string
Does dot product and then mod2 addition
"""
temp_list = list()
# loop through outstring, make into numpy array
for o in ostr:
temp_list.append(int(o))
ostr_arr = np.asarray(temp_list)
temp_list.clear()
# loop through period string, make into numpy array
for p in pstr:
temp_list.append(int(p))
pstr_arr = np.asarray(temp_list)
temp_list.clear()
# Per Simosn, we do the dot product of the np arrays and then do mod 2
results = np.dot(ostr_arr, pstr_arr)
if results % 2 == 0:
return True
return False
#### START ####
# hidden stringsn
period_strings_5qubit = list()
period_strings_5qubit = create_period_str(2)
period_strings_2bit = list()
period_strings_3bit = list()
period_strings_4bit = list()
period_strings_5bit = list()
period_strings_6bit = list()
period_strings_7bit = list()
# 2-bit strings
period_strings_2bit = create_period_str(2)
# 3-bit strings
period_strings_3bit = create_period_str(3)
# 4-bit strings
period_strings_4bit = create_period_str(4)
# 5-bit strings
period_strings_5bit = create_period_str(5)
# 6-bit strings
period_strings_6bit = create_period_str(6)
# 7-bit strings
period_strings_7bit = create_period_str(7)
# IBM Q stuff..
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
# 14 qubit (broken?)
melbourne = provider.get_backend('ibmq_16_melbourne')
#5 qubit backends
ibmqx2 = provider.get_backend('ibmqx2') # Yorktown
london = provider.get_backend('ibmq_london')
essex = provider.get_backend('ibmq_essex')
burlington = provider.get_backend('ibmq_burlington')
ourense = provider.get_backend('ibmq_ourense')
vigo = provider.get_backend('ibmq_vigo')
least = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits == 5 and not x.configuration().simulator and x.status().operational==True))
# Setup logging
# Will fail if file exists already -- because I'm lazy
'''
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename='results-2-7bit/' + melbourne.name() + '-2bit-12iter.txt',
filemode='x')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
'''
#Nam comes back as ibmq_backend, get the part after ibmq
#least_name = least.name().split('_')[1]
#print("Least busy backend: " + least_name)
# 32 qubit qasm simulator - IBMQ
ibmq_sim = provider.get_backend('ibmq_qasm_simulator')
# Local Simulator,
local_sim = Aer.get_backend('qasm_simulator')
circuitList = list()
backend_list = dict()
#backend_list['local_sim'] = local_sim
backend_list['ibmqx2'] = ibmqx2
backend_list['london'] = london
backend_list['essex'] = essex
backend_list['burlington'] = burlington
backend_list['ourense'] = ourense
backend_list['melbourne'] = melbourne
backend_list['vigo'] = vigo
#backend14q_list['melbourne'] = melbourne
## DO NOT USE ITERATION FORMULA JUST HERE FOR REF
# Iterations = # of backends tested
# iteration formula = floor(log2(num_backends * num_shots)) = 14 here
# 2-bit period strings
ranJobs = list()
backname = "local_sim"
#2bit = 12 = 36 random functions , min = 35
#3bit = 54 = 37+ random functions, min = 372
#4bit = 26 = 390, min = 384
#5bit = 13 = 403, min = 385
#6bit = 7 = 441, min = 385
#7bit = 4 = 508, min = 385
iterations = 12
#o Jobs total = # of strings * iterations
total_jobs = iterations * len(period_strings_2bit)
job_start_idx = 1
circs = list()
dup_count = 0
# Idea here is we have are feeding hidden bitstrings and getting back results from the QC
# Create circuits
for period in period_strings_2bit:
#print(str(period))
n = len(period)
# Seed random number
print("=== Creating Circuit ===")
#logging.info("=== Creating Circuit: " + str(period) + " ===")
# This allows us to get consistent random functions generated for f(x)
np.random.seed(2) ## returns 0 duplicates for 2bit stings, 36 iterations
#np.random.seed(384) ## returns 21 duplicates for 3bit stings, 54 iterations
#np.random.seed(227)
for k in range(iterations):
# Generate circuit
qr = QuantumRegister(2*n)
cr = ClassicalRegister(n)
simonCircuit = QuantumCircuit(qr,cr)
# Hadamards prior to oracle
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier()
# Oracle query
simonCircuit = blackbox(period)
# Apply hadamards again
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier()
# Measure qubits, maybe change to just first qubit to measure
simonCircuit.measure(qr[0:n],cr)
circs.append(simonCircuit)
#### end iterations loop for debugging
# Check for duplicates
# We compare count_ops() to get the actual operations and order they're in
# count_ops returns OrderedDict
k = 0
while k < len(circs)-1:
if circs[k].count_ops() == circs[k+1].count_ops():
#print("\n=== Duplicates Found! ===")
#print("Index:" + str(k))
#print("Index:" + str(k+1))
dup_count = dup_count + 1
#print(circs[k].count_ops())
#print(circs[k+1].count_ops())
#print("=== End Duplcates ===")
k = k+2
else:
k = k+1
print("Total Circuits:" + str(len(circs)))
#logging.info("Total Circuits:" + str(len(circs)))
print("Total Duplicates:" + str(dup_count))
#logging.info("Total Duplicates:" + str(dup_count))
for circ in circs:
print(circ)
exit(1)
# Run Circuits
logging.info("\n=== Sending data to IBMQ Backend:" + melbourne.name() + " ===\n")
for circ in circs:
#print("Job: " + str(job_start_idx) + "/" + str(total_jobs))
logging.info("Job: " + str(job_start_idx) + "/" + str(total_jobs))
job = execute(circ,backend=melbourne, shots=1024)
#job = execute(circ,backend=local_sim, shots=1024)
job_start_idx += 1
job_monitor(job,interval=3)
# Store result, including period string
qj = QJob(job,circ,melbourne.name(), period)
ranJobs.append(qj)
# Go through and get correct vs incorrect in jobs
for qjob in ranJobs:
results = qjob.job.result()
counts = results.get_counts()
equations = guass_elim(results)
# Get period string
pstr = qjob.getPeriod()
obsv_strs = list()
str_cnt = 0
sorted_str = sorted(results.get_counts().items(), key=operator.itemgetter(1), reverse=True)
#print("==== RAW RESULTS ====")
#logging.info("==== RAW RESULTS ====")
#logging.info("Period String:" + qjob.getPeriod())
#logging.info(counts)
# Get just the observed strings
for string in sorted_str:
obsv_strs.append(string[0])
# go through and verify strings
for o in obsv_strs:
# Remember to re-reverse string so it's back to normal due to IBMQ Endianness
if verify_string(o,pstr):
# Goes through strings and counts
for string, count in counts.items():
if string == o:
#print("===== SET CORRECT =====")
#print("Correct String: " + string)
#logging.info("Correct String: " + string)
#print("Correct String Counts: " + str(count))
qjob.setCorrect(count)
else:
# lookup counts based on string
# counts is a dict()
for string, count in counts.items():
if string == o:
# Add value to incorrect holder in object
#print("===== SET INCORRECT =====")
#print("Incorrect String: " + string)
#logging.info("Incorrect String: " + string)
#print("Incorrect String Counts: " + str(count))
qjob.setIncorrect(count)
total_correct = 0
total_incorrect = 0
total_runs = (1024 * iterations) * len(period_strings_2bit)
for qjob in ranJobs:
total_correct += qjob.getCorrect()
total_incorrect += qjob.getIncorrect()
logging.info("\n\nTotal Runs: " + str(total_runs))
logging.info("Total Correct: " + str(total_correct))
logging.info("Prob Correct: " + str(float(total_correct) / float(total_runs)))
logging.info("Total Incorrect: " + str(total_incorrect))
logging.info("Prob Incorrect: " + str(float(total_incorrect) / float(total_runs)))
logging.info("Total Duplicates:" + str(dup_count))
exit(1)
# Least busy backend, for individual testing
#backend_list[least_name] = least
# Make Circuits for all period strings!
#for p in period_strings_5qubit:
for p in period_strings_14qubit:
# Circuit name = Simon_+ period string
#circuitName = "Simon-" + p
circuitName = p
n = len(p)
# For simons, we use the first n registers for control qubits
# We use the last n registers for data qubits.. which is why we need 2*n
qr = QuantumRegister(2*n)
cr = ClassicalRegister(n)
simonCircuit = QuantumCircuit(qr,cr,name=circuitName)
# Apply hadamards prior to oracle
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier()
#call oracle for period string
simonCircuit = blackbox(p)
# Apply hadamards after blackbox
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier()
# Measure qubits, maybe change to just first qubit to measure
simonCircuit.measure(qr[0:n],cr)
circuitList.append(simonCircuit)
# Run loop to send circuits to IBMQ..
local_sim_ranJobs = list()
ibmqx2_ranJobs = list()
london_ranJobs = list()
essex_ranJobs = list()
burlington_ranJobs = list()
ourense_ranJobs = list()
vigo_ranJobs = list()
ibmq_sim_ranJobs = list()
melbourne_ranJobs = list()
print("\n===== SENDING DATA TO IBMQ BACKENDS... =====\n")
ranJobs = list()
jcount = 1
jtotal = 500
for name in backend_list:
for circuit in circuitList:
job = execute(circuit,backend=backend_list[name], shots=1024)
# Keep tabs on running jobs
print("Running job on backend: " + name)
print("Running job: " + str(jcount) + "/" + str(jtotal))
jcount += 1
job_monitor(job,interval=5)
# Custom object to hold the job, circuit, and backend
qj = QJob(job,circuit,name)
#print(qj.backend())
# Append finished / ran job to list of jobs
ranJobs.append(qj)
for qjob in ranJobs:
# Results from each job
results = qjob.job.result()
# total counts from job
counts = results.get_counts()
# equations from each job
equations = guass_elim(results)
#period string encoded into name
pstr = qjob.circuit.name
#list of observed strings
obs_strings = list()
str_counts = 0
# Sorted strings from each job
sorted_str = sorted(results.get_counts().items(), key=operator.itemgetter(1), reverse=True)
# Get just the observed strings
for string in sorted_str:
obs_strings.append(string[0])
# go through and verify strings
for o in obs_strings:
# Remember to re-reverse string so it's back to normal due to IBMQ Endianness
if verify_string(o,pstr):
for string, count in counts.items():
if string == o:
#print("===== SET CORRECT =====")
qjob.setCorrect(count)
else:
# lookup counts based on string
# counts is a dict()
for string, count in counts.items():
if string == o:
# Add value to incorrect holder in object
#print("===== SET INCORRECT =====")
qjob.setIncorrect(count)
# Now we haev the stats finished, let's store them in a list based on their backend name
if qjob.backend() == "ibmqx2":
ibmqx2_ranJobs.append(qjob)
elif qjob.backend() == "london":
london_ranJobs.append(qjob)
elif qjob.backend() == "burlington":
burlington_ranJobs.append(qjob)
elif qjob.backend() == "essex":
essex_ranJobs.append(qjob)
elif qjob.backend() == "ourense":
ourense_ranJobs.append(qjob)
elif qjob.backend() == "vigo":
vigo_ranJobs.append(qjob)
elif qjob.backend() == "ibmq_sim":
ibmq_sim_ranJobs.append(qjob)
elif qjob.backend() == "melbourne":
melbourne_ranJobs.append(qjob)
elif qjob.backend() == "local_sim":
local_sim_ranJobs.append(qjob)
else:
continue
backends_5qubit_ranJobs = dict()
backends_14qubit_ranJobs = dict()
backends_sims_ranJobs = dict()
#q5b = ["ibmqx2", "vigo", "ourense", "london", "essex", "burlington"]
#q5b = ["ibmqx2"]
#q5b = ["vigo"]
#q5b = ["ourense"]
#q5b = ["london"]
q5b = ["essex"]
#q5b = ["burlington"]
q14b = ["melbourne"]
sims = ["local_sim"]
#sims = ["local_sim", "ibmq_sim"]
backends_5qubit_ranJobs['ibmqx2'] = ibmqx2_ranJobs
backends_5qubit_ranJobs['vigo'] = vigo_ranJobs
backends_5qubit_ranJobs['ourense'] = ourense_ranJobs
backends_5qubit_ranJobs['london'] = london_ranJobs
backends_5qubit_ranJobs['essex'] = essex_ranJobs
backends_5qubit_ranJobs['burlington'] = burlington_ranJobs
backends_14qubit_ranJobs['melbourne'] = melbourne_ranJobs
backends_sims_ranJobs['local_sim'] = local_sim_ranJobs
#backends_sims['ibmq_sim'] = ibmq_sim_ranJobs
# The idea here is to loop through the dictionary by using a name in the list of names above
# as such then, we can call dictionaries in a loop with that name, which contain the list of
# ran jobs
def printStats(backend, job_list):
'''
backend: backend name
job_list: list of ran jobs from backend
'''
total_correct = 0
total_incorrect = 0
# Total = shots = repeitions of circuit
# 1024 x 4 period strings we can use with 2-qubit = 4096
# Probably make this dynamic for 14-qubit
# 2 - 7 qubits = 142336 total runs
total = 142336
pcorrect = 0.00
pincorrect = 0.00
# Go through each job/period string's data
for job in job_list:
total_correct += job.getCorrect()
#print("Total Correct inc: " + str(total_correct))
total_incorrect += job.getIncorrect()
#print("Total INCorrect inc: " + str(total_incorrect))
# This is to handle when we use a simiulator, nothing should be incorrect to avoid dividing by 0
if total_incorrect == 0:
pincorrect = 0.00
else:
pincorrect = 100*(total_incorrect / total)
pcorrect = 100*(total_correct / total)
print("\n===== RESULTS - " + backend + " =====\n")
print("Total Results: " + str(total))
print("Total Correct Results: " + str(total_correct) + " -- " + str(pcorrect) + "%")
print("Total Inorrect Results: " + str(total_incorrect) + " -- " + str(pincorrect) + "%")
print("\n===================\n")
'''
for backend in sims:
printStats(backend, backends_sims_ranJobs[backend])
'''
#printStats(least_name, backends_5qubit_ranJobs[least_name])
# for each backend name in the backend name list...
'''
for backend in q5b:
printStats(backend, backends_5qubit_ranJobs[backend])
'''
# 14-qubit backend
for backend in q14b:
printStats(backend, backends_14qubit_ranJobs[backend])
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import operator
import itertools
#from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute, IBMQ
from qiskit.providers.ibmq import least_busy
from collections import OrderedDict
# AER is for simulators
from qiskit import Aer
from qiskit import QuantumCircuit
from qiskit import ClassicalRegister
from qiskit import QuantumRegister
from qiskit import execute
from qiskit import IBMQ
#from qiskit.providers.ibmq.managed import IBMQJobManager
from qiskit.tools.monitor import job_monitor
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.visualization import circuit_drawer
from sympy import Matrix, pprint, MatrixSymbol, expand, mod_inverse
unitary_sim = Aer.get_backend('unitary_simulator')
n = 2
# Generate circuit
#qr = QuantumRegister(2*n,'q')
qr = QuantumRegister(2*n)
#cr = ClassicalRegister(n,'c')
cr = ClassicalRegister(n)
simonCircuit = QuantumCircuit(qr,cr)
uni_list = list()
def example():
result2 = execute(simonCircuita, unitary_sim).result()
unitary2 = result2.get_unitary(simonCircuita)
if np.all((unitary2 == x) for x in uni_list):
print("Duplicate")
else:
print("No duplicate")
uni_list.append(unitary2)
simonCircuit.h(qr[0])
simonCircuit.h(qr[1])
simonCircuit.cx(qr[0],qr[2])
simonCircuit.x(qr[3])
result1 = execute(simonCircuit, unitary_sim).result()
unitary1 = result1.get_unitary(simonCircuit)
uni_list.append(unitary1)
print(len(uni_list))
# Generate circuit
#qra = QuantumRegister(2*n,'q')
qra = QuantumRegister(2*n)
#cra = ClassicalRegister(n,'c')
cra = ClassicalRegister(n)
simonCircuita = QuantumCircuit(qra,cra)
simonCircuita.h(qra[0])
simonCircuita.h(qra[1])
example()
print(len(uni_list))
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
import sys
import logging
import matplotlib.pyplot as plt
import numpy as np
import operator
import itertools
#from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute, IBMQ
from qiskit.providers.ibmq import least_busy
from collections import OrderedDict
# AER is for simulators
from qiskit import Aer
from qiskit import QuantumCircuit
from qiskit import ClassicalRegister
from qiskit import QuantumRegister
from qiskit import execute
from qiskit import IBMQ
#from qiskit.providers.ibmq.managed import IBMQJobManager
from qiskit.tools.monitor import job_monitor
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.visualization import circuit_drawer
from sympy import Matrix, pprint, MatrixSymbol, expand, mod_inverse
from qjob import QJob
unitary_sim = Aer.get_backend('unitary_simulator')
## function to get dot product of result string with the period string to verify, result should be 0
#check the wikipedia for simons formula
# DOT PRODUCT IS MOD 2 !!!!
# Result XOR ?? = 0 -- this is what we're looking for!
# We have to verify the period string with the ouptput using mod_2 addition aka XOR
# Simply xor the period string with the output string
# Simply xor the period string with the output string, result must be 0 or 0b0
def verify_string(ostr, pstr):
"""
Verify string with period string
Does dot product and then mod2 addition
"""
temp_list = list()
# loop through outstring, make into numpy array
for o in ostr:
temp_list.append(int(o))
ostr_arr = np.asarray(temp_list)
temp_list.clear()
# loop through period string, make into numpy array
for p in pstr:
temp_list.append(int(p))
pstr_arr = np.asarray(temp_list)
temp_list.clear()
# Per Simosn, we do the dot product of the np arrays and then do mod 2
results = np.dot(ostr_arr, pstr_arr)
if results % 2 == 0:
return True
return False
def blackbox(simonCircuit, uni_list, period_string):
#### Blackbox Function #####
# QP's don't care about this, we do#
#############################
#bbqr = QuantumRegister(2*n, 'q')
#bbcr = ClassicalRegister(n, 'c')
#bbcirc = QuantumCircuit(bbqr,bbcr)
flag = True
while flag:
bbqr = QuantumRegister(2*n, 'q')
bbcr = ClassicalRegister(n, 'c')
bbcirc = QuantumCircuit(bbqr,bbcr)
# Copy first register to second by using CNOT gates
for i in range(n):
#simonCircuit.cx(qr[i],qr[n+i])
#bbcirc.cx(qr[i],qr[n+i])
bbcirc.cx(bbqr[i],bbqr[n+i])
# get the small index j such it's "1"
j = -1
#reverse the string so that it takes
s = period_string[::-1]
for i, c in enumerate(s):
if c == "1":
j = i
break
# 1-1 and 2-1 mapping with jth qubit
# x is control to xor 2nd qubit with a
for i, c in enumerate(s):
if c == "1" and j >= 0:
#simonCircuit.x(qr[j])
bbcirc.cx(bbqr[j], bbqr[n+i]) #the i-th qubit is flipped if s_i is 1
#simonCircuit.x(qr[j])
# Added to expand function space so that we get enough random
# functions for statistical sampling
# Randomly add a CX gate
for i in range(n):
for j in range(i+1, n):
if np.random.random() > 0.5:
bbcirc.cx(bbqr[n+i],bbqr[n+j])
# Random peemutation
# This part is how we can get by with 1 query of the oracle and better
# simulates quantum behavior we'd expect
perm = list(np.random.permutation(n))
# init position
init = list(range(n))
i = 0
while i < n:
if init[i] != perm[i]:
k = perm.index(init[i])
bbcirc.swap(bbqr[n+i], bbqr[n+k]) #swap gate on qubits
init[i], init[k] = init[k], init[i] # mark the swapped qubits
else:
i += 1
# Randomly flip qubit
# Seed random numbers for predictability / benchmark
for i in range(n):
if np.random.random() > 0.5:
bbcirc.x(bbqr[n+i])
# Added for duplicate checking
# We get the unitary matrix of the blackbox generated circuit
bb_sim_result = execute(bbcirc, unitary_sim).result()
bb_uni = bb_sim_result.get_unitary(bbcirc, decimals=15)
# Duplicate flag
dup = False
# Handle empty list
if len(uni_list) == 0:
#uni_list.append(bb_uni)
# Set flag to false to break out of main loop
flag = False
dup = False
print("adding first generated")
else:
# Check for duplicates
# If duplicate oracle query, re-run oracle
#print(np.array_equal(bb_uni, uni_list[0]))
for i, uni in enumerate(uni_list):
if (bb_uni == uni).all():
#if np.array_equal(bb_uni, uni):
# Break out of for loop because we founhd a duplicate, re-run to get new blackbox circuit
print("Duplicates Found, restarting loop")
dup = True
break # breaks out of for loop to start over,
else:
dup = False
# If duplicate flag not set after we searched the list...
if not dup:
# No duplicate unitary matricies, we can add to list
# and break out
uni_list.append(bb_uni)
flag = False
print("No Duplicates - Added another to list")
### End While
# Combine input circuit with created blackbox circuit
simonCircuit = simonCircuit + bbcirc
simonCircuit.barrier()
print("Ending blackbox")
return simonCircuit
## easily create period strings
## We want to avoid using anything with all 0's as that gives us false results
## because anything mod2 00 will give results
def create_period_str(strlen):
str_list = list()
for i in itertools.product([0,1],repeat=strlen):
if "1" in ("".join(map(str,i))):
#print("".join(map(str,i)))
str_list.append("".join(map(str,i)))
return str_list
#### START ####
# hidden stringsn
period_strings_5qubit = list()
period_strings_5qubit = create_period_str(2)
period_strings_2bit = list()
period_strings_3bit = list()
period_strings_4bit = list()
period_strings_5bit = list()
period_strings_6bit = list()
period_strings_7bit = list()
# 2-bit strings
period_strings_2bit = create_period_str(2)
# 3-bit strings
period_strings_3bit = create_period_str(3)
# 4-bit strings
period_strings_4bit = create_period_str(4)
# 5-bit strings
period_strings_5bit = create_period_str(5)
# 6-bit strings
period_strings_6bit = create_period_str(6)
# 7-bit strings
period_strings_7bit = create_period_str(7)
# IBM Q stuff..
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
circuitList = list()
## DO NOT USE ITERATION FORMULA JUST HERE FOR REF
# Iterations = # of backends tested
# iteration formula = floor(log2(num_backends * num_shots)) = 14 here
# 2-bit period strings
ranJobs = list()
backname = "local_sim"
#2bit = 12 = 36 random functions
#3bit = 54 = 372+ random functions
#4bit
#5bit
#6bit
#7bit
#o Jobs total = # of strings * iterations
#total_jobs = iterations * len(period_strings_5bit)
#job_start_idx = 1
circs = list()
def find_duplicates(circs):
k = 0
dup_count = 0
while k < len(circs)-1:
if circs[k].count_ops() == circs[k+1].count_ops():
#print("\n=== Duplicates Found! ===")
#print("Index:" + str(k))
#print("Index:" + str(k+1))
dup_count = dup_count + 1
#print(circs[k].count_ops())
#print(circs[k+1].count_ops())
#print("=== End Duplcates ===")
k = k+2
else:
k = k+1
return dup_count
def generate_simon(simonCircuit, uni_list, period):
# Generate circuit
# Assumes global simoncircuit
# Hadamards prior to oracle
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier()
# Oracle query
simonCircuit = blackbox(simonCircuit, uni_list, period)
# Apply hadamards again
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier()
# Measure qubits, maybe change to just first qubit to measure
simonCircuit.measure(qr[0:n],cr)
return simonCircuit
i = 0
z = 0
not_done = True
np.random.seed(0)
n = len(period_strings_2bit[0])
qr = QuantumRegister(2*n, 'q')
cr = ClassicalRegister(n, 'c')
simonCircuit = QuantumCircuit(qr,cr)
uni_list = list()
outfile = open("sim-results/simulations-2bit-12iter.txt", "w")
iterations = 12 #2-bit
#iterations = 54 #3-bit
#iterations = 26 #4-bit
#iterations = 13 #5-bit
#iterations = 7 #6-bit
#iterations = 4 #7-bit
local_sim = Aer.get_backend('qasm_simulator')
while not_done:
while i < len(period_strings_2bit):
#print("Started main block..")
#print(str(period_strings_6bit[i]))
n = len(period_strings_2bit[i])
print("Period strings: " + str(i+1) + "/" + str(len(period_strings_2bit)))
while z < iterations:
qr = QuantumRegister(2*n, 'q')
cr = ClassicalRegister(n, 'c')
simonCircuit = QuantumCircuit(qr,cr)
# Duplicates are checked in blackbox function
simon = generate_simon(simonCircuit, uni_list, period_strings_2bit[i])
circs.append(simon)
z = z + 1
print("Iterations:" + str(z) + "/" + str(iterations))
i = i + 1
z = 0
not_done = False
dup_flag = False
print("\nDouble checking heuristically...\n")
# Double checking all items in list are not duplicate
for x in range(0,len(uni_list)-1):
for y in range(1,len(uni_list)):
# Handle condition when x and y overlap and are eachother
if x != y:
if np.array_equal(uni_list[x], uni_list[y]):
print("Duplicates found at indexes:" + str(x) + "," + str(y))
dup_flag = True
#print("\nDuplicates in set, not valid\n")
if dup_flag:
print("\nDuplicates Found, see above.\n")
else:
print("\nNo duplicates found in 2nd pass\n")
print("\nRunning final check of dot product between period string and observed strings...")
### Now to run on simulator ####
iter_cnt = 0
pstr_cnt = 0
ranJobs = list()
for circ in circs:
job = execute(circ, backend=local_sim, shots=1024, optimization_level=3, seed_transpiler=0)
# create Qjob, store info
qj = QJob(job, circ, "local_sim", period_strings_2bit[pstr_cnt])
ranJobs.append(qj)
result = job.result()
counts = result.get_counts()
# We advance to next period string when we iterate through all
# the circuits per period strings
if iter_cnt == iterations-1:
pstr_cnt += 1
iter_cnt = 0
else:
iter_cnt += 1
# outfile.write(str(counts) + "\n")
outfile.close()
# Go through and get correct vs incorrect in jobs
## This will verify all the strings we get back are correct from the non
# duplicate circuits
for qjob in ranJobs:
results = qjob.job.result()
counts = results.get_counts()
#equations = guass_elim(results)
# Get period string
pstr = qjob.getPeriod()
# Verify observed string vs peroid string by doing dot product
for ostr, count in counts.items():
if verify_string(ostr, pstr):
qjob.setCorrect(count)
else:
qjob.setIncorrect(count)
total_correct = 0
total_incorrect = 0
total_runs = (1024 * iterations) * len(period_strings_2bit)
for qjob in ranJobs:
total_correct += qjob.getCorrect()
total_incorrect += qjob.getIncorrect()
print("\nTotal Runs: " + str(total_runs))
print("Total Correct: " + str(total_correct))
print("Prob Correct: " + str(float(total_correct) / float(total_runs)))
print("Total Incorrect: " + str(total_incorrect))
print("Prob Incorrect: " + str(float(total_incorrect) / float(total_runs)))
print("")
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import operator
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute,Aer, IBMQ
#from qiskit.providers.ibmq.managed import IBMQJobManager
from qiskit.tools.visualization import plot_histogram
from qiskit.tools.visualization import circuit_drawer
from qiskit.tools.monitor import job_monitor
from sympy import Matrix, pprint, MatrixSymbol, expand, mod_inverse
from qiskit.providers.ibmq import least_busy
# hidden period string
# Goes from most-significant bit to least-significant bit (left to right)
s = "10"
n = len(s)
# Create registers
# 2^n quantum registers half for control, half for data,
# n classical registers for the output
qr = QuantumRegister(2*n)
cr = ClassicalRegister(n)
circuitName = "Simon"
simonCircuit = QuantumCircuit(qr,cr, name=circuitName)
#print(simonCircuit.name)
local_sim = Aer.get_backend('qasm_simulator')
# Apply hadamards prior to oracle
for i in range(n):
simonCircuit.h(qr[i])
# Barrier
simonCircuit.barrier()
#### Blackbox Function #####
# QP's don't care about this, we do#
#############################
# Copy first register to second by using CNOT gates
for i in range(n):
simonCircuit.cx(qr[i],qr[n+i])
# get the small index j such it's "1"
j = -1
#reverse the string so that it fixes the circuit drawing to be more normal
# to the literature where the most significant bit is on TOP and least is on BOTTOM
# IBMQ default this is reversed , LEAST is on TOP and MOST is on BOTTOM
s = s[::-1]
for i, c in enumerate(s):
if c == "1":
j = i
break
# 1-1 and 2-1 mapping with jth qubit
# x is control to xor 2nd qubit with a
for i, c in enumerate(s):
if c == "1" and j >= 0:
#simonCircuit.x(qr[j])
simonCircuit.cx(qr[j], qr[n+i]) #the i-th qubit is flipped if s_i is 1
#simonCircuit.x(qr[j])
# Random peemutation
# This part is how we can get by with 1 query of the oracle and better
# simulates quantum behavior we'd expect
perm = list(np.random.permutation(n))
# init position
init = list(range(n))
i = 0
while i < n:
if init[i] != perm[i]:
k = perm.index(init[i])
simonCircuit.swap(qr[n+i],qr[n+k]) #swap gate on qubits
init[i], init[k] = init[k], init[i] # mark the swapped qubits
else:
i += 1
# Randomly flip qubit
for i in range(n):
if np.random.random() > 0.5:
simonCircuit.x(qr[n+i])
simonCircuit.barrier()
### END OF BLACKBOX FUNCTION
# Apply hadamard gates to registers again
for i in range(n):
simonCircuit.h(qr[i])
simonCircuit.barrier(qr)
# draw circuit
#circuit_drawer(simonCircuit)
print(simonCircuit)
simonCircuit.barrier()
simonCircuit.measure(qr[0:n],cr)
'''
[<IBMQSimulator('ibmq_qasm_simulator')
<IBMQBackend('ibmqx2')
<IBMQBackend('ibmq_16_melbourne')
<IBMQBackend('ibmq_vigo') f
<IBMQBackend('ibmq_ourense')
'''
IBMQ.load_account()
qprovider = IBMQ.get_provider(hub='ibm-q')
#qprovider.backends()
# Get the least busy backend
#qbackend = least_busy(qprovider.backends(filters=lambda x: x.configuration().n_qubits == 5 and not x.configuration().simulator and x.status().operational==True))
qbackend = local_sim
backend_name = qbackend.name()
#print("least busy backend: ", qbackend)
#qbackend = qprovider.get_backend('ibmq_vigo')
#job_manager = IBMQJobManager()
# Default for this backend seems to be 1024 ibmqx2
qshots = 1024
print("Submitting to IBM Q...\n")
job = execute(simonCircuit,backend=qbackend, shots=qshots)
job_monitor(job,interval=2)
#job_set_bar = job_manager.run(simonCircuit, backend=qbackend, name='bar', max_experiments_per_job=5)
#print(job_set_bar.report())
qresults = job.result()
qcounts = qresults.get_counts()
#print("Getting Results...\n")
#print(qcounts)
#print("")
print("\nIBM Q Backend %s: Resulting Values and Probabilities" % local_sim)
print("===============================================\n")
print("Simulated Runs:",qshots,"\n")
# period, counts, prob,a0,a1,...,an
#
for key, val in qcounts.items():
prob = val / qshots
print("Observed String:", key, ", Counts:", val, ", Probability:", prob)
print("")
# Classical post processing via Guassian elimination for the linear equations
# Y a = 0
# k[::-1], we reverse the order of the bitstring
lAnswer = [ (k[::-1],v) for k,v in qcounts.items() if k != "0"*n ]
# Sort basis by probabilities
lAnswer.sort(key = lambda x: x[1], reverse=True)
Y = []
for k, v in lAnswer:
Y.append( [ int(c) for c in k ] )
Y = Matrix(Y)
Y_transformed = Y.rref(iszerofunc=lambda x: x % 2==0)
# convert rational and negatives in rref
def mod(x,modulus):
numer,denom = x.as_numer_denom()
return numer*mod_inverse(denom,modulus) % modulus
# Deal with negative and fractial values
Y_new = Y_transformed[0].applyfunc(lambda x: mod(x,2))
print("The hidden period a0, a1 ... a%d only satisfies these equations:" %(n-1))
print("===============================================================\n")
rows,cols = Y_new.shape
equations = list()
Yr = list()
for r in range(rows):
Yr = [ "a"+str(i)+"" for i,v in enumerate(list(Y_new[r,:])) if v==1]
if len(Yr) > 0:
#tStr = " + ".join(Yr)
tStr = " xor ".join(Yr)
#single value is 0, only xor period string with 0 to get
if len(tStr) == 2:
equations.append("period string xor" + " 0 " + " = 0")
else:
equations.append("period string" + " xor " + tStr + " = 0")
#tStr = u' \2295 '.join(Yr)
print(tStr, "= 0")
# Now we need to solve this system of equations to get our period string
print("")
print("Here are the system of equations to solve")
print("=========================================")
print("Format: period_string xor a_x xor ... = 0\n")
for eq in equations:
print(eq)
print()
# Sort list by value
#reverse items to display back to original inputs
# We reversed above because of how IBMQ handles "endianness"
#reverse_strings = dict()
#s = s[::-1]
"""
for k,v in qcounts.items():
k = k[::-1]
reverse_strings[k] = v
"""
sorted_x = sorted(qcounts.items(), key=operator.itemgetter(1), reverse=True)
print("Sorted list of result strings by counts")
print("======================================\n")
# Print out list of items
for i in sorted_x:
print(i)
print(str(type(i)))
#print(sorted_x)
print("")
# Now once we have our found string, we need to double-check by XOR back to the
# y value
# Look into nullspaces with numpy
# Need to get x and y values based on above.. to help out
'''
IBM Q Backend ibmqx2: Resulting Values and Probabilities
===============================================
Simulated Runs: 1024
Period: 01 , Counts: 196 , Probability: 0.19140625
Period: 11 , Counts: 297 , Probability: 0.2900390625
Period: 10 , Counts: 269 , Probability: 0.2626953125
Period: 00 , Counts: 262 , Probability: 0.255859375
'''
# Already using a sorted list, the one with the highest probability is on top
correct = 0
incorrect = 0
def verify_string(ostr, pstr):
"""
Verify string with period string
Does dot product and then mod2 addition
"""
temp_list = list()
# loop through outstring, make into numpy array
for o in ostr:
temp_list.append(int(o))
ostr_arr = np.asarray(temp_list)
temp_list.clear()
# loop through period string, make into numpy array
for p in pstr:
temp_list.append(int(p))
pstr_arr = np.asarray(temp_list)
temp_list.clear()
# Per Simosn, we do the dot product of the np arrays and then do mod 2
results = np.dot(ostr_arr, pstr_arr)
if results % 2 == 0:
return True
return False
obs_strings = list()
for x in sorted_x:
obs_strings.append(x[0])
for o in obs_strings:
# Need to re-reverse string, so it's "normal"
if verify_string(o, s[::-1]):
print("Correct Result: " + o )
correct += 1
else:
print("Incorrect Result: " + o)
incorrect += 1
print("\n===== Correct vs Incorrect Computations =====\n")
print("Total Correct: " + str(correct))
print("Total Incorrect: " + str(incorrect))
print("")
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
from pprint import pprint
import numpy as np
import argparse
from collections import defaultdict
from qiskit import IBMQ, Aer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute
from qiskit.visualization import plot_histogram
from sympy import Matrix, mod_inverse
from qiskit import IBMQ
from qiskit.tools.monitor import job_monitor
from qiskit.providers.ibmq import least_busy
#IBMQ.save_account('<your acct number>')
class Simons(object):
def __init__(self, n, f):
self._qr = QuantumRegister(2*n)
self._cr = ClassicalRegister(n)
self._oracle = self._create_oracle(f)
def _create_oracle(self, f):
n = len(list(f.keys())[0])
U = np.zeros(shape=(2 ** (2 * n), 2 ** (2 * n)))
for a in range(2 ** n):
ab = np.binary_repr(a, n)
for k, v in f.items():
U[int(ab + k, 2), int(xor(ab, v) + k, 2)] = 1
return U
def _create_circuit(self, oracle):
circuit = QuantumCircuit(self._qr, self._cr)
circuit.h(self._qr[:len(self._cr)])
circuit.barrier()
circuit.unitary(oracle, self._qr, label='oracle')
circuit.barrier()
circuit.h(self._qr[:len(self._cr)])
circuit.measure(self._qr[:len(self._cr)], self._cr)
return circuit
def _solve(self, counts):
# reverse inputs, remove all zero inputs, and sort
counts = [(k[::-1], v) for k, v in counts.items()
if not all([x == '0' for x in k])]
counts.sort(key=lambda x: x[1], reverse=True)
# construct sympy matrix
matrix = Matrix([[int(i) for i in k] for k, _ in counts])
# gaussian elimination mod 2
matrix = matrix.rref(iszerofunc=lambda x: x % 2 == 0)
matrix = matrix[0].applyfunc(lambda x: mod(x, 2))
# extract string
n_rows, _ = matrix.shape
s = [0] * len(self._cr)
for r in range(n_rows):
yi = [i for i, v in enumerate(list(matrix[r, :])) if v == 1]
if len(yi) == 2:
s[yi[0]] = '1'
s[yi[1]] = '1'
return s[::-1]
def run(self, shots=1024, provider=None):
circuit = self._create_circuit(self._oracle)
if provider is None:
# run the program on a QVM
simulator = Aer.get_backend('qasm_simulator')
job = execute(circuit, simulator, shots=shots)
else:
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= len(self._qr) and
not x.configuration().simulator and x.status().operational==True))
print("least busy backend: ", backend)
job = execute(circuit, backend=backend, shots=shots, optimization_level=3)
job_monitor(job, interval=2)
try:
results = job.result()
except:
raise Exception(job.error_message())
counts = results.get_counts()
print('Generated circuit: ')
print(circuit.draw())
print('Circuit output:')
print(counts)
print("Time taken:", results.time_taken)
return self._solve(counts)
def mod(x, modulus):
numer, denom = x.as_numer_denom()
return numer * mod_inverse(denom, modulus) % modulus
def xor(x, y):
assert len(x) == len(y)
n = len(x)
return format(int(x, 2) ^ int(y, 2), f'0{n}b')
def one_to_one_mapping(s):
n = len(s)
form_string = "{0:0" + str(n) + "b}"
bit_map_dct = {}
for idx in range(2 ** n):
bit_string = np.binary_repr(idx, n)
bit_map_dct[bit_string] = xor(bit_string, s)
return bit_map_dct
def two_to_one_mapping(s):
mapping = one_to_one_mapping(s)
n = len(mapping.keys()) // 2
new_range = np.random.choice(list(sorted(mapping.keys())), replace=False, size=n).tolist()
mapping_pairs = sorted([(k, v) for k, v in mapping.items()], key=lambda x: x[0])
new_mapping = {}
# f(x) = f(x xor s)
for i in range(n):
x = mapping_pairs[i]
y = new_range[i]
new_mapping[x[0]] = y
new_mapping[x[1]] = y
return new_mapping
def main():
parser = argparse.ArgumentParser()
parser.add_argument('string', help='Secret string s of length n')
parser.add_argument('ftype', type=int, help='1 for one-to-one or 2 for two-to-one')
parser.add_argument('--ibmq', action='store_true', help='Run on IBMQ')
args = parser.parse_args()
if args.ibmq:
try:
provider = IBMQ.load_account()
except:
raise Exception("Could not find saved IBMQ account.")
assert all([x == '1' or x == '0' for x in args.string]), 'string argument must be a binary string.'
n = len(args.string)
if args.ftype == 1:
mapping = one_to_one_mapping(args.string)
elif args.ftype == 2:
mapping = two_to_one_mapping(args.string)
else:
raise ValueError('Invalid function type.')
print('Generated mapping:')
pprint(mapping)
simons = Simons(n, mapping)
result = simons.run(provider=provider if args.ibmq else None)
result = ''.join([str(x) for x in result])
# Check if result satisfies two-to-one function constraint
success = np.array([mapping[x] == mapping[xor(x, result)] for x in mapping.keys()]).all() and not all([x == '0' for x in result])
if success:
print(f'Oracle function is two-to-one with s = {result}.')
else:
print('Oracle is one-to-one.')
if __name__ == '__main__':
main()
|
https://github.com/drnickallgood/simonqiskit
|
drnickallgood
|
import qiskit
qiskit.__qiskit_version__
#initialization
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# importing Qiskit
from qiskit import BasicAer, IBMQ
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.compiler import transpile
from qiskit.tools.monitor import job_monitor
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
# Load the saved IBMQ accounts
IBMQ.load_account()
s = "010101" # the hidden bitstring
assert 1 < len(s) < 20, "The length of s must be between 2 and 19"
for c in s:
assert c == "0" or c == "1", "s must be a bitstring of '0' and '1'"
n = len(s) #the length of the bitstring
# Step 1
# Creating registers
# qubits for querying the oracle and recording its output
qr = QuantumRegister(2*n)
# for recording the measurement on the first register of qr
cr = ClassicalRegister(n)
circuitName = "Simon"
simonCircuit = QuantumCircuit(qr, cr)
# Step 2
# Apply Hadamard gates before querying the oracle
for i in range(n):
simonCircuit.h(qr[i])
# Apply barrier to mark the beginning of the blackbox function
simonCircuit.barrier()
# Step 3 query the blackbox function
# copy the content of the first register to the second register
for i in range(n):
simonCircuit.cx(qr[i], qr[n+i])
# get the least index j such that s_j is "1"
j = -1
for i, c in enumerate(s):
if c == "1":
j = i
break
# Creating 1-to-1 or 2-to-1 mapping with the j-th qubit of x as control to XOR the second register with s
for i, c in enumerate(s):
if c == "1" and j >= 0:
simonCircuit.cx(qr[j], qr[n+i]) #the i-th qubit is flipped if s_i is 1
# get random permutation of n qubits
perm = list(np.random.permutation(n))
#initial position
init = list(range(n))
i = 0
while i < n:
if init[i] != perm[i]:
k = perm.index(init[i])
simonCircuit.swap(qr[n+i], qr[n+k]) #swap qubits
init[i], init[k] = init[k], init[i] #marked swapped qubits
else:
i += 1
# randomly flip the qubit
for i in range(n):
if np.random.random() > 0.5:
simonCircuit.x(qr[n+i])
# Apply the barrier to mark the end of the blackbox function
simonCircuit.barrier()
# Step 4 apply Hadamard gates to the first register
for i in range(n):
simonCircuit.h(qr[i])
# Step 5 perform measurement on the first register
for i in range(n):
simonCircuit.measure(qr[i], cr[i])
#draw the circuit
simonCircuit.draw(output='mpl')
# use local simulator
backend = BasicAer.get_backend("qasm_simulator")
# the number of shots is twice the length of the bitstring
shots = 2*n
job = execute(simonCircuit, backend=backend, shots=shots)
answer = job.result().get_counts()
plot_histogram(answer)
# Post-processing step
# Constructing the system of linear equations Y s = 0
# By k[::-1], we reverse the order of the bitstring
lAnswer = [ (k[::-1],v) for k,v in answer.items() if k != "0"*n ] #excluding the trivial all-zero
#Sort the basis by their probabilities
lAnswer.sort(key = lambda x: x[1], reverse=True)
Y = []
for k, v in lAnswer:
Y.append( [ int(c) for c in k ] )
#import tools from sympy
from sympy import Matrix, pprint, MatrixSymbol, expand, mod_inverse
Y = Matrix(Y)
#pprint(Y)
#Perform Gaussian elimination on Y
Y_transformed = Y.rref(iszerofunc=lambda x: x % 2==0) # linear algebra on GF(2)
#to convert rational and negatives in rref of linear algebra on GF(2)
def mod(x,modulus):
numer, denom = x.as_numer_denom()
return numer*mod_inverse(denom,modulus) % modulus
Y_new = Y_transformed[0].applyfunc(lambda x: mod(x,2)) #must takecare of negatives and fractional values
#pprint(Y_new)
print("The hidden bistring s[ 0 ], s[ 1 ]....s[",n-1,"] is the one satisfying the following system of linear equations:")
rows, cols = Y_new.shape
for r in range(rows):
Yr = [ "s[ "+str(i)+" ]" for i, v in enumerate(list(Y_new[r,:])) if v == 1 ]
if len(Yr) > 0:
tStr = " + ".join(Yr)
print(tStr, "= 0")
#Use one of the available backends
backend = IBMQ.get_backend("ibmq_16_melbourne")
# show the status of the backend
print("Status of", backend, "is", backend.status())
shots = 10*n #run more experiments to be certain
max_credits = 3 # Maximum number of credits to spend on executions.
simonCompiled = transpile(simonCircuit, backend=backend, optimization_level=1)
job_exp = execute(simonCompiled, backend=backend, shots=shots, max_credits=max_credits)
job_monitor(job_exp)
results = job_exp.result()
answer = results.get_counts(simonCircuit)
plot_histogram(answer)
# Post-processing step
# Constructing the system of linear equations Y s = 0
# By k[::-1], we reverse the order of the bitstring
lAnswer = [ (k[::-1][:n],v) for k,v in answer.items() ] #excluding the qubits that are not part of the inputs
#Sort the basis by their probabilities
lAnswer.sort(key = lambda x: x[1], reverse=True)
Y = []
for k, v in lAnswer:
Y.append( [ int(c) for c in k ] )
Y = Matrix(Y)
#Perform Gaussian elimination on Y
Y_transformed = Y.rref(iszerofunc=lambda x: x % 2==0) # linear algebra on GF(2)
Y_new = Y_transformed[0].applyfunc(lambda x: mod(x,2)) #must takecare of negatives and fractional values
#pprint(Y_new)
print("The hidden bistring s[ 0 ], s[ 1 ]....s[",n-1,"] is the one satisfying the following system of linear equations:")
rows, cols = Y_new.shape
for r in range(rows):
Yr = [ "s[ "+str(i)+" ]" for i, v in enumerate(list(Y_new[r,:])) if v == 1 ]
if len(Yr) > 0:
tStr = " + ".join(Yr)
print(tStr, "= 0")
|
https://github.com/yforman/QAOA
|
yforman
|
#In case you don't have qiskit, install it now
%pip install qiskit --quiet
#Installing/upgrading pylatexenc seems to have fixed my mpl issue
#If you try this and it doesn't work, try also restarting the runtime/kernel
%pip install pylatexenc --quiet
!pip install -Uqq ipdb
!pip install qiskit_optimization
import networkx as nx
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import BasicAer
from qiskit.compiler import transpile
from qiskit.quantum_info.operators import Operator, Pauli
from qiskit.quantum_info import process_fidelity
from qiskit.extensions.hamiltonian_gate import HamiltonianGate
from qiskit.extensions import RXGate, XGate, CXGate
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, execute
import numpy as np
from qiskit.visualization import plot_histogram
import ipdb
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
#quadratic optimization
from qiskit_optimization import QuadraticProgram
from qiskit_optimization.converters import QuadraticProgramToQubo
%pdb on
# def ApplyCost(qc, gamma):
# Ix = np.array([[1,0],[0,1]])
# Zx= np.array([[1,0],[0,-1]])
# Xx = np.array([[0,1],[1,0]])
# Temp = (Ix-Zx)/2
# T = Operator(Temp)
# I = Operator(Ix)
# Z = Operator(Zx)
# X = Operator(Xx)
# FinalOp=-2*(T^I^T)-(I^T^T)-(T^I^I)+2*(I^T^I)-3*(I^I^T)
# ham = HamiltonianGate(FinalOp,gamma)
# qc.append(ham,[0,1,2])
task = QuadraticProgram(name = 'QUBO on QC')
task.binary_var(name = 'x')
task.binary_var(name = 'y')
task.binary_var(name = 'z')
task.minimize(linear = {"x":-1,"y":2,"z":-3}, quadratic = {("x", "z"): -2, ("y", "z"): -1})
qubo = QuadraticProgramToQubo().convert(task) #convert to QUBO
operator, offset = qubo.to_ising()
print(operator)
# ham = HamiltonianGate(operator,0)
# print(ham)
Ix = np.array([[1,0],[0,1]])
Zx= np.array([[1,0],[0,-1]])
Xx = np.array([[0,1],[1,0]])
Temp = (Ix-Zx)/2
T = Operator(Temp)
I = Operator(Ix)
Z = Operator(Zx)
X = Operator(Xx)
FinalOp=-2*(T^I^T)-(I^T^T)-(T^I^I)+2*(I^T^I)-3*(I^I^T)
ham = HamiltonianGate(FinalOp,0)
print(ham)
#define PYBIND11_DETAILED_ERROR_MESSAGES
def compute_expectation(counts):
"""
Computes expectation value based on measurement results
Args:
counts: dict
key as bitstring, val as count
G: networkx graph
Returns:
avg: float
expectation value
"""
avg = 0
sum_count = 0
for bitstring, count in counts.items():
x = int(bitstring[2])
y = int(bitstring[1])
z = int(bitstring[0])
obj = -2*x*z-y*z-x+2*y-3*z
avg += obj * count
sum_count += count
return avg/sum_count
# We will also bring the different circuit components that
# build the qaoa circuit under a single function
def create_qaoa_circ(theta):
"""
Creates a parametrized qaoa circuit
Args:
G: networkx graph
theta: list
unitary parameters
Returns:
qc: qiskit circuit
"""
nqubits = 3
n,m=3,3
p = len(theta)//2 # number of alternating unitaries
qc = QuantumCircuit(nqubits,nqubits)
Ix = np.array([[1,0],[0,1]])
Zx= np.array([[1,0],[0,-1]])
Xx = np.array([[0,1],[1,0]])
Temp = (Ix-Zx)/2
T = Operator(Temp)
I = Operator(Ix)
Z = Operator(Zx)
X = Operator(Xx)
FinalOp=-2*(Z^I^Z)-(I^Z^Z)-(Z^I^I)+2*(I^Z^I)-3*(I^I^Z)
beta = theta[:p]
gamma = theta[p:]
# initial_state
for i in range(0, nqubits):
qc.h(i)
for irep in range(0, p):
#ipdb.set_trace(context=6)
# problem unitary
# for pair in list(G.edges()):
# qc.rzz(2 * gamma[irep], pair[0], pair[1])
#ApplyCost(qc,2*0)
ham = HamiltonianGate(operator,2 * gamma[irep])
qc.append(ham,[0,1,2])
# mixer unitary
for i in range(0, nqubits):
qc.rx(2 * beta[irep], i)
qc.measure(qc.qubits[:n],qc.clbits[:m])
return qc
# Finally we write a function that executes the circuit on the chosen backend
def get_expectation(shots=512):
"""
Runs parametrized circuit
Args:
G: networkx graph
p: int,
Number of repetitions of unitaries
"""
backend = Aer.get_backend('qasm_simulator')
backend.shots = shots
def execute_circ(theta):
qc = create_qaoa_circ(theta)
# ipdb.set_trace(context=6)
counts = {}
job = execute(qc, backend, shots=1024)
result = job.result()
counts=result.get_counts(qc)
return compute_expectation(counts)
return execute_circ
from scipy.optimize import minimize
expectation = get_expectation()
res = minimize(expectation, [1, 1], method='COBYLA')
expectation = get_expectation()
res = minimize(expectation, res.x, method='COBYLA')
res
from qiskit.visualization import plot_histogram
backend = Aer.get_backend('aer_simulator')
backend.shots = 512
qc_res = create_qaoa_circ(res.x)
backend = Aer.get_backend('qasm_simulator')
job = execute(qc_res, backend, shots=1024)
result = job.result()
counts=result.get_counts(qc_res)
plot_histogram(counts)
|
https://github.com/ernchern/qiskit-vaqsd
|
ernchern
|
from math import sqrt, pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
import oracle_simple
import composed_gates
def get_circuit(n, oracles):
"""
Build the circuit composed by the oracle black box and the other quantum gates.
:param n: The number of qubits (not including the ancillas)
:param oracles: A list of black box (quantum) oracles; each of them selects a specific state
:returns: The proper quantum circuit
:rtype: qiskit.QuantumCircuit
"""
cr = ClassicalRegister(n)
## Testing
if n > 3:
#anc = QuantumRegister(n - 1, 'anc')
# n qubits for the real number
# n - 1 qubits for the ancillas
qr = QuantumRegister(n + n - 1)
qc = QuantumCircuit(qr, cr)
else:
# We don't need ancillas
qr = QuantumRegister(n)
qc = QuantumCircuit(qr, cr)
## /Testing
print("Number of qubits is {0}".format(len(qr)))
print(qr)
# Initial superposition
for j in range(n):
qc.h(qr[j])
# The length of the oracles list, or, in other words, how many roots of the function do we have
m = len(oracles)
# Grover's algorithm is a repetition of an oracle box and a diffusion box.
# The number of repetitions is given by the following formula.
print("n is ", n)
r = int(round((pi / 2 * sqrt((2**n) / m) - 1) / 2))
print("Repetition of ORACLE+DIFFUSION boxes required: {0}".format(r))
oracle_t1 = oracle_simple.OracleSimple(n, 5)
oracle_t2 = oracle_simple.OracleSimple(n, 0)
for j in range(r):
for i in range(len(oracles)):
oracles[i].get_circuit(qr, qc)
diffusion(n, qr, qc)
for j in range(n):
qc.measure(qr[j], cr[j])
return qc, len(qr)
def diffusion(n, qr, qc):
"""
The Grover diffusion operator.
Given the arry of qiskit QuantumRegister qr and the qiskit QuantumCircuit qc, it adds the diffusion operator to the appropriate qubits in the circuit.
"""
for j in range(n):
qc.h(qr[j])
# D matrix, flips state |000> only (instead of flipping all the others)
for j in range(n):
qc.x(qr[j])
# 0..n-2 control bits, n-1 target, n..
if n > 3:
composed_gates.n_controlled_Z_circuit(
qc, [qr[j] for j in range(n - 1)], qr[n - 1],
[qr[j] for j in range(n, n + n - 1)])
else:
composed_gates.n_controlled_Z_circuit(
qc, [qr[j] for j in range(n - 1)], qr[n - 1], None)
for j in range(n):
qc.x(qr[j])
for j in range(n):
qc.h(qr[j])
|
https://github.com/ernchern/qiskit-vaqsd
|
ernchern
|
import numpy as np
from qiskit import(
QuantumCircuit,
execute,
Aer)
from qiskit import IBMQ
IBMQ.save_account("e69e6c2e07ed86d44bf6ba9dc2db3c727e01eceeaea1d4d5508a8331a192d414336aeec995677836051acafd09a886485064a85f4931d2fbeee945d6ea16801b")
IBMQ.load_account()
def setQubit(circuit, a, b):
if b!=0 :
circuit.u3(2*np.arccos(a),np.arccos(np.real(b)/abs(b)),0,0)
else:
circuit.u3(2*np.arccos(a),0,0,0)
def addLayer(circuit, params, index):
circuit.u3(params[0],params[1],params[2],index)
def build_circuit(params, a_1, b_1, a_2, b_2, a_3, b_3, bias, shots = 1000, verbose = False):
'''
Inputs:
params: 4 by 3 array of nodes
shots: number of executions of the circuit
a_n and b_n : nth qubit's parameters
Output:
p_success: Success rate
p_inconclusive: Probability of getting the leftover output
'''
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Create a Quantum Circuit acting on the q register
circuit1 = QuantumCircuit(2, 2)
circuit2 = QuantumCircuit(2, 2)
circuit3 = QuantumCircuit(2, 2)
#Set the quits
setQubit(circuit1, a_1, b_1)
setQubit(circuit2, a_2, b_2)
setQubit(circuit3, a_3, b_3)
params = params.reshape((4,3))
#"Neural layers"
addLayer(circuit1, params[0][:],0)
addLayer(circuit1, params[1][:],1)
addLayer(circuit2, params[0][:],0)
addLayer(circuit2, params[1][:],1)
addLayer(circuit3, params[0][:],0)
addLayer(circuit3, params[1][:],1)
# CNOT gate
circuit1.cx(0,1)
circuit2.cx(0,1)
circuit3.cx(0,1)
#"Neural layers"
addLayer(circuit1, params[2][:],0)
addLayer(circuit1, params[3][:],1)
addLayer(circuit2, params[2][:],0)
addLayer(circuit2, params[3][:],1)
addLayer(circuit3, params[2][:],0)
addLayer(circuit3, params[3][:],1)
#CNOT gate
circuit1.cx(1,0)
circuit2.cx(1,0)
circuit3.cx(1,0)
# Measure
circuit1.measure([0,1], [0,1])
circuit2.measure([0,1], [0,1])
circuit3.measure([0,1], [0,1])
# Execute the circuit on the qasm simulator
job1 = execute(circuit1, simulator, shots = shots)
job2 = execute(circuit2, simulator, shots = shots)
job3 = execute(circuit3, simulator, shots = shots)
# Grab results from the job
result1 = job1.result()
result2 = job2.result()
result3 = job3.result()
# Returns counts
counts1 = result1.get_counts(circuit1)
counts2 = result2.get_counts(circuit2)
counts3 = result3.get_counts(circuit3)
if verbose:
print(circuit1)
print(counts1)
print(circuit2)
print(counts2)
print(circuit3)
print(counts3)
for i in ['00', '01','10','11']:
if not i in counts1:
counts1[i] = 0
if not i in counts2:
counts2[i] = 0
if not i in counts3:
counts3[i] = 0
p_success = (counts1['00']+counts2['01']+counts3['10'])/(3*shots)
p_inconclusive = (counts1['11']+counts2['11']+counts3['11'])/(3*shots)
#obj_value = p_success / (p_inconclusive + bias*shots)
#obj_value = 1.5 * p_success - p_inconclusive
return (p_success, p_inconclusive)
|
https://github.com/ernchern/qiskit-vaqsd
|
ernchern
|
import qiskit
import numpy as np
from qiskit import(
QuantumCircuit,
execute,
Aer)
from qiskit.visualization import plot_histogram
import numpy as np
np.random.seed(99999)
params = np.random.rand(3)
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Create a Quantum Circuit acting on the q register
circuit = QuantumCircuit(2, 2)
# Add a H gate on qubit 0
circuit.h(1)
# # Add a CX (CNOT) gate on control qubit 0 and target qubit 1
# circuit.cx(0, 1)
circuit.u3(params[0],params[1],params[2],0)
circuit.u3(params[0],params[1],params[2],1)
# Map the quantum measurement to the classical bits
circuit.measure([0,1], [0,1])
# Execute the circuit on the qasm simulator
job = execute(circuit, simulator, shots=1000)
# Grab results from the job
result = job.result()
# Returns counts
counts = result.get_counts(circuit)
print("\nTotal count for 00 and 11 are:",counts)
# Draw the circuit
circuit.draw()
plot_histogram(counts)
counts['00']
np.array([counts['00'],counts['01'],counts['10'],counts['11']])/1000
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
from qiskit_dynamics.array import Array
from qiskit.quantum_info import Statevector, DensityMatrix, Operator
from qiskit.circuit.parameter import Parameter
import jax
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready, config
import chex
from typing import Optional, Union
Array.set_default_backend('jax')
config.update('jax_enable_x64', True)
config.update('jax_platform_name', 'cpu')
# Constructing a Two Qutrit Hamiltonian
dim = 3
v0 = 4.86e9
anharm0 = -0.32e9
r0 = 0.22e9
v1 = 4.97e9
anharm1 = -0.32e9
r1 = 0.26e9
J = 0.002e9
a = np.diag(np.sqrt(np.arange(1, dim)), 1)
adag = np.diag(np.sqrt(np.arange(1, dim)), -1)
N = np.diag(np.arange(dim))
ident = np.eye(dim, dtype=complex)
full_ident = np.eye(dim**2, dtype=complex)
N0 = np.kron(ident, N)
N1 = np.kron(N, ident)
a0 = np.kron(ident, a)
a1 = np.kron(a, ident)
a0dag = np.kron(ident, adag)
a1dag = np.kron(adag, ident)
static_ham0 = 2 * np.pi * v0 * N0 + np.pi * anharm0 * N0 * (N0 - full_ident)
static_ham1 = 2 * np.pi * v1 * N1 + np.pi * anharm1 * N1 * (N1 - full_ident)
static_ham_full = static_ham0 + static_ham1 + 2 * np.pi * J * ((a0 + a0dag) @ (a1 + a1dag))
drive_op0 = 2 * np.pi * r0 * (a0 + a0dag)
drive_op1 = 2 * np.pi * r1 * (a1 + a1dag)
batchsize = 400
amp_vals = jnp.linspace(0.5, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(20, 80, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(-0.5, 0.5, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
batch_y0 = jnp.tile(np.ones(9), (batchsize, 1))
batch_obs = jnp.tile(N0, (batchsize, 1, 1))
print(f"Batched Params Shape: {batch_params.shape}")
# Constructing a custom function that takes as input a parameter vector and returns the simulated state
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a Scalable Symbolic Pulse
special_pulse = pulse.Drag(
duration=320,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
# Constructing the new solver
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
t_linspace = np.linspace(0.0, 400e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
ham_ops = [drive_op0, drive_op1, drive_op0, drive_op1]
ham_chans = ["d0", "d1", "u0", "u1"]
chan_freqs = {"d0": v0, "d1": v1, "u0": v1, "u1": v0}
solver = Solver(
static_hamiltonian=static_ham_full,
hamiltonian_operators=ham_ops,
rotating_frame=static_ham_full,
hamiltonian_channels=ham_chans,
channel_carrier_freqs=chan_freqs,
dt=dt,
)
class JaxifiedSolver:
def __init__(
self,
schedule_func,
dt,
carrier_freqs,
ham_chans,
t_span,
rtol,
atol
):
super().__init__()
self.schedule_func = schedule_func
self.dt = dt
self.carrier_freqs = carrier_freqs
self.ham_chans = ham_chans
self.t_span = t_span
self.rtol = rtol
self.atol = atol
self.fast_batched_sim = jit(vmap(self.run_sim))
def run_sim(self, y0, obs, params):
sched = self.schedule_func(params)
converter = InstructionToSignals(self.dt, carriers=self.carrier_freqs, channels=self.ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=self.t_span,
y0=y0 / jnp.linalg.norm(y0),
t_eval=self.t_span,
signals=signals,
rtol=self.rtol,
atol=self.atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
state_vec = state_vec / jnp.linalg.norm(state_vec)
two_vec = state_vec[:4]
evolved_vec = jnp.dot(obs, two_vec)
new_vec = jnp.concatenate((evolved_vec, state_vec[4:]))
probs_vec = jnp.abs(new_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
# Shots instead of probabilities
return probs_vec
def estimate(self, batch_y0, batch_obs, batch_params):
ops_mat = [b.to_matrix() for b in batch_obs]
ops_arr = jnp.array(ops_mat)
return self.fast_batched_sim(batch_y0, ops_arr, batch_params)
j_solver = JaxifiedSolver(
schedule_func=standard_func,
dt=dt,
carrier_freqs=chan_freqs,
ham_chans=ham_chans,
t_span=t_span,
rtol=rtol,
atol=atol
)
from qiskit.quantum_info import SparsePauliOp
ops_list = [SparsePauliOp(["IX"]), SparsePauliOp(["IY"]), SparsePauliOp(["YZ"]), SparsePauliOp(["ZX"])] * 100
batch_res = j_solver.estimate(
batch_y0,
ops_list,
batch_params
)
%timeit j_solver.estimate(batch_y0,ops_list,batch_params)
from qiskit import QuantumCircuit
from qiskit.quantum_info import Statevector
qc = QuantumCircuit(3)
ket = Statevector(qc)
qc.x(2)
ket2 = Statevector(qc)
qc.x(1)
ket3 = Statevector(qc)
ket.draw()
print(ket.data)
print(ket)
print(ket2)
print(ket3)
total_vec = np.ones(3 ** 2)
total_vec /= np.linalg.norm(total_vec)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
from qiskit_dynamics.array import Array
from qiskit.quantum_info import Statevector, DensityMatrix, Operator, SparsePauliOp
from qiskit.circuit.parameter import Parameter
import jax
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready, config
import chex
from typing import Optional, Union
Array.set_default_backend('jax')
config.update('jax_enable_x64', True)
config.update('jax_platform_name', 'cpu')
# Constructing a Two Qutrit Hamiltonian
dim = 3
v0 = 4.86e9
anharm0 = -0.32e9
r0 = 0.22e9
v1 = 4.97e9
anharm1 = -0.32e9
r1 = 0.26e9
J = 0.002e9
a = np.diag(np.sqrt(np.arange(1, dim)), 1)
adag = np.diag(np.sqrt(np.arange(1, dim)), -1)
N = np.diag(np.arange(dim))
ident = np.eye(dim, dtype=complex)
full_ident = np.eye(dim**2, dtype=complex)
N0 = np.kron(ident, N)
N1 = np.kron(N, ident)
a0 = np.kron(ident, a)
a1 = np.kron(a, ident)
a0dag = np.kron(ident, adag)
a1dag = np.kron(adag, ident)
static_ham0 = 2 * np.pi * v0 * N0 + np.pi * anharm0 * N0 * (N0 - full_ident)
static_ham1 = 2 * np.pi * v1 * N1 + np.pi * anharm1 * N1 * (N1 - full_ident)
static_ham_full = static_ham0 + static_ham1 + 2 * np.pi * J * ((a0 + a0dag) @ (a1 + a1dag))
drive_op0 = 2 * np.pi * r0 * (a0 + a0dag)
drive_op1 = 2 * np.pi * r1 * (a1 + a1dag)
batchsize = 400
amp_vals = jnp.linspace(0.5, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(20, 80, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(-0.5, 0.5, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
batch_y0 = jnp.tile(np.ones(9), (batchsize, 1))
batch_obs = jnp.tile(N0, (batchsize, 1, 1))
print(f"Batched Params Shape: {batch_params.shape}")
# Constructing a custom function that takes as input a parameter vector and returns the simulated state
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a Scalable Symbolic Pulse
special_pulse = pulse.Drag(
duration=320,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
# Constructing the new solver
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
t_linspace = np.linspace(0.0, 400e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
ham_ops = [drive_op0, drive_op1, drive_op0, drive_op1]
ham_chans = ["d0", "d1", "u0", "u1"]
chan_freqs = {"d0": v0, "d1": v1, "u0": v1, "u1": v0}
solver = Solver(
static_hamiltonian=static_ham_full,
hamiltonian_operators=ham_ops,
rotating_frame=static_ham_full,
hamiltonian_channels=ham_chans,
channel_carrier_freqs=chan_freqs,
dt=dt,
)
op_str = "XI"
num_qubits = len(op_str)
qudit_dim_size = 3
init_state = np.zeros(qudit_dim_size ** num_qubits, dtype=np.complex64)
init_state[1] = 1
base_gates_dict = {
"I": jnp.array([[1.0, 0.], [0., 1.]]),
"X": jnp.array([[0., 1.], [1., 0.]]),
"Y": jnp.array([[0., -1.0j], [1.0j, 0.]]),
"Z": jnp.array([[1., 0.], [0., -1.]])
}
def PauliToQuditMatrix(inp_str: str, qudit_dim_size: Optional[int] = 4):
word_list = list(inp_str)
qudit_op_list = []
for word in word_list:
qubit_op = base_gates_dict[word]
qud_op = np.identity(qudit_dim_size, dtype=np.complex64)
qud_op[:2,:2] = qubit_op
qudit_op_list.append(qud_op)
complete_op = qudit_op_list[0]
for i in range(1,len(qudit_op_list)):
complete_op = np.kron(complete_op, qudit_op_list[i])
return complete_op
def evolve_state(batch_state, batch_var_str):
return_state = []
for state, var_str in zip(batch_state, batch_var_str):
complete_op = PauliToQuditMatrix(var_str, qudit_dim_size)
return_state.append(complete_op @ state)
return return_state
b_size = 400
batch_state = [init_state] * b_size
batch_var_str = [op_str] * b_size
%timeit evolve_state(batch_state, batch_var_str)
class JaxedSolver:
def __init__(
self,
schedule_func,
dt,
carrier_freqs,
ham_chans,
t_span,
rtol,
atol
):
super().__init__()
self.schedule_func = schedule_func
self.dt = dt
self.carrier_freqs = carrier_freqs
self.ham_chans = ham_chans
self.t_span = t_span
self.rtol = rtol
self.atol = atol
self.fast_batched_sim = jit(vmap(self.run_sim))
def run_sim(self, y0, obs, params):
sched = self.schedule_func(params)
converter = InstructionToSignals(self.dt, carriers=self.carrier_freqs, channels=self.ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=self.t_span,
y0=y0 / jnp.linalg.norm(y0),
t_eval=self.t_span,
signals=signals,
rtol=self.rtol,
atol=self.atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
state_vec = state_vec / jnp.linalg.norm(state_vec)
new_vec = obs @ state_vec
probs_vec = jnp.abs(new_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
# Shots instead of probabilities
return probs_vec
def estimate2(self, batch_y0, batch_params, batch_obs_str):
batch_obs = jnp.zeros((batch_y0.shape[0], batch_y0.shape[1], batch_y0.shape[1]), dtype=jnp.complex64)
for i, b_str in enumerate(batch_obs_str):
batch_obs = batch_obs.at[i].set(PauliToQuditMatrix(b_str, dim))
return self.fast_batched_sim(batch_y0, batch_obs, batch_params)
j_solver_2 = JaxedSolver(
schedule_func=standard_func,
dt=dt,
carrier_freqs=chan_freqs,
ham_chans=ham_chans,
t_span=t_span,
rtol=rtol,
atol=atol
)
ops_str_list = ["IX", "XY", "ZX", "ZI"] * 100
batch_res = j_solver_2.estimate2(
batch_y0,
batch_params,
ops_str_list
)
%timeit j_solver_2.estimate2(batch_y0, batch_params, ops_str_list)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
from qiskit_dynamics.array import Array
from qiskit.quantum_info import Statevector, DensityMatrix, Operator
from qiskit.circuit.parameter import Parameter
import jax
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready, config
import chex
from typing import Optional, Union
Array.set_default_backend('jax')
config.update('jax_enable_x64', True)
config.update('jax_platform_name', 'cpu')
# Constructing a Two Qutrit Hamiltonian
dim = 3
v0 = 4.86e9
anharm0 = -0.32e9
r0 = 0.22e9
v1 = 4.97e9
anharm1 = -0.32e9
r1 = 0.26e9
J = 0.002e9
a = np.diag(np.sqrt(np.arange(1, dim)), 1)
adag = np.diag(np.sqrt(np.arange(1, dim)), -1)
N = np.diag(np.arange(dim))
ident = np.eye(dim, dtype=complex)
full_ident = np.eye(dim**2, dtype=complex)
N0 = np.kron(ident, N)
N1 = np.kron(N, ident)
a0 = np.kron(ident, a)
a1 = np.kron(a, ident)
a0dag = np.kron(ident, adag)
a1dag = np.kron(adag, ident)
static_ham0 = 2 * np.pi * v0 * N0 + np.pi * anharm0 * N0 * (N0 - full_ident)
static_ham1 = 2 * np.pi * v1 * N1 + np.pi * anharm1 * N1 * (N1 - full_ident)
static_ham_full = static_ham0 + static_ham1 + 2 * np.pi * J * ((a0 + a0dag) @ (a1 + a1dag))
drive_op0 = 2 * np.pi * r0 * (a0 + a0dag)
drive_op1 = 2 * np.pi * r1 * (a1 + a1dag)
# Default Solver Options
y0 = Array(Statevector(np.ones(9)))
t_linspace = np.linspace(0.0, 400e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
ham_ops = [drive_op0, drive_op1, drive_op0, drive_op1]
ham_chans = ["d0", "d1", "u0", "u1"]
chan_freqs = {"d0": v0, "d1": v1, "u0": v1, "u1": v0}
solver = Solver(
static_hamiltonian=static_ham_full,
hamiltonian_operators=ham_ops,
rotating_frame=static_ham_full,
hamiltonian_channels=ham_chans,
channel_carrier_freqs=chan_freqs,
dt=dt,
)
# Constructing a custom function that takes as input a parameter vector and returns the simulated state
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a Scalable Symbolic Pulse
special_pulse = pulse.Drag(
duration=320,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
def evolve_func(inp_y0, params, obs):
sched = standard_func(params)
converter = InstructionToSignals(dt, carriers=chan_freqs, channels=ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=t_span,
y0=inp_y0 / jnp.linalg.norm(inp_y0),
t_eval=t_linspace,
signals=signals,
rtol=rtol,
atol=atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
evolved_vec = jnp.dot(obs, state_vec) / jnp.linalg.norm(state_vec)
probs_vec = jnp.abs(evolved_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
return probs_vec
fast_evolve_func = jit(vmap(evolve_func))
batchsize = 400
amp_vals = jnp.linspace(0.5, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(20, 80, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(-0.5, 0.5, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
batch_y0 = jnp.tile(np.ones(9), (batchsize, 1))
batch_obs = jnp.tile(N0, (batchsize, 1, 1))
print(f"Batched Params Shape: {batch_params.shape}")
res = fast_evolve_func(batch_y0, batch_params, batch_obs)
print(res)
print(res.shape)
# Timing the fast jit + vmap batched simulation
%timeit fast_evolve_func(batch_y0, batch_params, batch_obs).block_until_ready()
# Timing a standard simulation without jitting or vmapping
%timeit evolve_func(batch_y0[200], batch_params[200], batch_obs[200])
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
from qiskit_dynamics.array import Array
from qiskit.quantum_info import Statevector, DensityMatrix, Operator
from qiskit.circuit.parameter import Parameter
import jax
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready, config
import chex
from typing import Optional, Union
Array.set_default_backend('jax')
config.update('jax_enable_x64', True)
config.update('jax_platform_name', 'cpu')
# Constructing a Two Qutrit Hamiltonian
dim = 3
v0 = 4.86e9
anharm0 = -0.32e9
r0 = 0.22e9
v1 = 4.97e9
anharm1 = -0.32e9
r1 = 0.26e9
J = 0.002e9
a = np.diag(np.sqrt(np.arange(1, dim)), 1)
adag = np.diag(np.sqrt(np.arange(1, dim)), -1)
N = np.diag(np.arange(dim))
ident = np.eye(dim, dtype=complex)
full_ident = np.eye(dim**2, dtype=complex)
N0 = np.kron(ident, N)
N1 = np.kron(N, ident)
a0 = np.kron(ident, a)
a1 = np.kron(a, ident)
a0dag = np.kron(ident, adag)
a1dag = np.kron(adag, ident)
static_ham0 = 2 * np.pi * v0 * N0 + np.pi * anharm0 * N0 * (N0 - full_ident)
static_ham1 = 2 * np.pi * v1 * N1 + np.pi * anharm1 * N1 * (N1 - full_ident)
static_ham_full = static_ham0 + static_ham1 + 2 * np.pi * J * ((a0 + a0dag) @ (a1 + a1dag))
drive_op0 = 2 * np.pi * r0 * (a0 + a0dag)
drive_op1 = 2 * np.pi * r1 * (a1 + a1dag)
# Default Solver Options
y0 = Array(Statevector(np.ones(9)))
t_linspace = np.linspace(0.0, 200e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
ham_ops = [drive_op0, drive_op1, drive_op0, drive_op1]
ham_chans = ["d0", "d1", "u0", "u1"]
chan_freqs = {"d0": v0, "d1": v1, "u0": v1, "u1": v0}
solver = Solver(
static_hamiltonian=static_ham_full,
hamiltonian_operators=ham_ops,
rotating_frame=static_ham_full,
hamiltonian_channels=ham_chans,
channel_carrier_freqs=chan_freqs,
dt=dt,
)
# Constructing General Gaussian Waveform
# Helper function that returns a lifted Gaussian symbolic equation.
def lifted_gaussian(
t: sym.Symbol,
center,
t_zero,
sigma,
) -> sym.Expr:
t_shifted = (t - center).expand()
t_offset = (t_zero - center).expand()
gauss = sym.exp(-((t_shifted / sigma) ** 2) / 2)
offset = sym.exp(-((t_offset / sigma) ** 2) / 2)
return (gauss - offset) / (1 - offset)
# Structure for Constructing New Pulse Waveform
_t, _duration, _amp, _sigma, _angle = sym.symbols("t, duration, amp, sigma, angle")
_center = _duration / 2
envelope_expr = (
_amp * sym.exp(sym.I * _angle) * lifted_gaussian(_t, _center, _duration + 1, _sigma)
)
gaussian_pulse = pulse.ScalableSymbolicPulse(
pulse_type="Gaussian",
duration=160,
amp=0.3,
angle=0,
parameters={"sigma": 40},
envelope=envelope_expr,
constraints=_sigma > 0,
valid_amp_conditions=sym.Abs(_amp) <= 1.0,
)
gaussian_pulse.draw()
# Constructing a custom function that takes as input a parameter vector and returns the simulated state
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a scalable symbolic pulse
# However we can equivalently use our own custom defined symbolic pulse
special_pulse = pulse.Drag(
duration=160,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
def sim_func(params):
sched = standard_func(params)
converter = InstructionToSignals(dt, carriers=chan_freqs, channels=ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=t_span,
y0=y0,
t_eval=t_linspace,
signals=signals,
rtol=rtol,
atol=atol,
convert_results=False,
method='jax_odeint'
)
return results.y.data
fast_func = jit(vmap(sim_func))
batchsize = 400
amp_vals = jnp.linspace(0.0, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(1, 40, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(0.0, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
print(f"Batched Params Shape: {batch_params.shape}")
res = fast_func(batch_params)
print(res)
print(res.shape)
# Timing the fast jit + vmap batched simulation
%timeit fast_func(batch_params).block_until_ready
# Timing a standard simulation without jitting or vmapping
%timeit sim_func(batch_params[200])
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from typing import Optional, Union
import qiskit
from qiskit import IBMQ, pulse
from library.dynamics_backend_estimator import DynamicsBackendEstimator
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q-nus', group='default', project='default')
backend = provider.get_backend('ibm_cairo')
estimator = DynamicsBackendEstimator(backend)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import jax
import jax.numpy as jnp
from jax.numpy.linalg import norm
import qiskit.pulse as pulse
from qiskit_dynamics.array import Array
from library.utils import PauliToQuditOperator, TwoQuditHamiltonian
from library.new_sims import JaxedSolver
Array.set_default_backend('jax')
jax.config.update('jax_enable_x64', True)
jax.config.update('jax_platform_name', 'cpu')
# Testing out the TwoQuditBackend Functionality
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
batchsize = 400
t_linspace = np.linspace(0.0, 400e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
qudit_dim = 3
q_end = TwoQuditHamiltonian(
qudit_dim=qudit_dim,
dt=dt
)
solver = q_end.solver
ham_ops = q_end.ham_ops
ham_chans = q_end.ham_chans
chan_freqs = q_end.chan_freqs
# Make the Custom Schedule Construction Function
amp_vals = jnp.linspace(0.5, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(20, 80, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(-0.5, 0.5, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
init_y0 = jnp.ones(qudit_dim ** 2, dtype=jnp.complex128)
init_y0 /= norm(init_y0)
batch_y0 = jnp.tile(init_y0, (batchsize, 1))
batch_str = ["XX", "IX", "YZ", "ZY"] * 100
print(f"initial statevec: {init_y0}")
print(f"statevector * hc: {init_y0 @ init_y0.conj().T}")
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a Scalable Symbolic Pulse
special_pulse = pulse.Drag(
duration=320,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
# Make the JaxedSolver backend
j_solver = JaxedSolver(
schedule_func=standard_func,
solver=solver,
dt=dt,
carrier_freqs=chan_freqs,
ham_chans=ham_chans,
ham_ops=ham_ops,
t_span=t_span,
rtol=rtol,
atol=atol
)
j_solver.estimate2(batch_y0=batch_y0, batch_params=batch_params, batch_obs_str=batch_str)
%timeit j_solver.estimate2(batch_y0=batch_y0, batch_params=batch_params, batch_obs_str=batch_str)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
from qiskit_dynamics.array import Array
from qiskit.quantum_info import Statevector, DensityMatrix, Operator, SparsePauliOp
from qiskit.circuit.parameter import Parameter
import jax
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready, config
import chex
from typing import Optional, Union
Array.set_default_backend('jax')
config.update('jax_enable_x64', True)
config.update('jax_platform_name', 'cpu')
# Constructing a Two Qutrit Hamiltonian
dim = 3
v0 = 4.86e9
anharm0 = -0.32e9
r0 = 0.22e9
v1 = 4.97e9
anharm1 = -0.32e9
r1 = 0.26e9
J = 0.002e9
a = np.diag(np.sqrt(np.arange(1, dim)), 1)
adag = np.diag(np.sqrt(np.arange(1, dim)), -1)
N = np.diag(np.arange(dim))
ident = np.eye(dim, dtype=complex)
full_ident = np.eye(dim**2, dtype=complex)
N0 = np.kron(ident, N)
N1 = np.kron(N, ident)
a0 = np.kron(ident, a)
a1 = np.kron(a, ident)
a0dag = np.kron(ident, adag)
a1dag = np.kron(adag, ident)
static_ham0 = 2 * np.pi * v0 * N0 + np.pi * anharm0 * N0 * (N0 - full_ident)
static_ham1 = 2 * np.pi * v1 * N1 + np.pi * anharm1 * N1 * (N1 - full_ident)
static_ham_full = static_ham0 + static_ham1 + 2 * np.pi * J * ((a0 + a0dag) @ (a1 + a1dag))
drive_op0 = 2 * np.pi * r0 * (a0 + a0dag)
drive_op1 = 2 * np.pi * r1 * (a1 + a1dag)
batchsize = 400
amp_vals = jnp.linspace(0.5, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(20, 80, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(-0.5, 0.5, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
batch_y0 = jnp.tile(np.ones(9), (batchsize, 1))
batch_obs = jnp.tile(N0, (batchsize, 1, 1))
print(f"Batched Params Shape: {batch_params.shape}")
# Constructing a custom function that takes as input a parameter vector and returns the simulated state
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a Scalable Symbolic Pulse
special_pulse = pulse.Drag(
duration=320,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
# Constructing the new solver
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
t_linspace = np.linspace(0.0, 400e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
ham_ops = [drive_op0, drive_op1, drive_op0, drive_op1]
ham_chans = ["d0", "d1", "u0", "u1"]
chan_freqs = {"d0": v0, "d1": v1, "u0": v1, "u1": v0}
solver = Solver(
static_hamiltonian=static_ham_full,
hamiltonian_operators=ham_ops,
rotating_frame=static_ham_full,
hamiltonian_channels=ham_chans,
channel_carrier_freqs=chan_freqs,
dt=dt,
)
class JaxifiedSolver:
def __init__(
self,
schedule_func,
dt,
carrier_freqs,
ham_chans,
t_span,
rtol,
atol
):
super().__init__()
self.schedule_func = schedule_func
self.dt = dt
self.carrier_freqs = carrier_freqs
self.ham_chans = ham_chans
self.t_span = t_span
self.rtol = rtol
self.atol = atol
self.fast_batched_sim = jit(vmap(self.run_sim))
def run_sim(self, y0, obs, params):
sched = self.schedule_func(params)
converter = InstructionToSignals(self.dt, carriers=self.carrier_freqs, channels=self.ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=self.t_span,
y0=y0 / jnp.linalg.norm(y0),
t_eval=self.t_span,
signals=signals,
rtol=self.rtol,
atol=self.atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
state_vec = state_vec / jnp.linalg.norm(state_vec)
two_vec = state_vec[:4]
evolved_vec = jnp.dot(obs, two_vec)
new_vec = jnp.concatenate((evolved_vec, state_vec[4:]))
probs_vec = jnp.abs(new_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
# Shots instead of probabilities
return probs_vec
def estimate(self, batch_y0, batch_obs, batch_params):
ops_mat = [b.to_matrix() for b in batch_obs]
ops_arr = jnp.array(ops_mat)
return self.fast_batched_sim(batch_y0, ops_arr, batch_params)
j_solver = JaxifiedSolver(
schedule_func=standard_func,
dt=dt,
carrier_freqs=chan_freqs,
ham_chans=ham_chans,
t_span=t_span,
rtol=rtol,
atol=atol
)
ops_list = [SparsePauliOp(["IX"]), SparsePauliOp(["IY"]), SparsePauliOp(["YZ"]), SparsePauliOp(["ZX"])] * 100
batch_res = j_solver.estimate(
batch_y0,
ops_list,
batch_params
)
%timeit j_solver.estimate(batch_y0,ops_list,batch_params)
op_x = SparsePauliOp('IXXYI')
arr = op_x.to_matrix()
print(arr.shape)
print(op_x.to_matrix())
dim_size = 8
qudit = jnp.array([1.0, 0.0, 0.0, 0.0])
qubit_op = jnp.array([[0., 1.0], [1.0, 0.0]])
big_ident = jnp.identity(dim_size)
big_op = big_ident.at[:2,:2].set(qubit_op)
big_op
op_str = "XI"
num_qubits = len(op_str)
qudit_dim_size = 3
init_state = np.zeros(qudit_dim_size ** num_qubits, dtype=np.complex64)
init_state[1] = 1
base_gates_dict = {
"I": jnp.array([[1.0, 0.], [0., 1.]]),
"X": jnp.array([[0., 1.], [1., 0.]]),
"Y": jnp.array([[0., -1.0j], [1.0j, 0.]]),
"Z": jnp.array([[1., 0.], [0., -1.]])
}
def PauliToQuditMatrix(inp_str: str, qudit_dim_size: Optional[int] = 4):
word_list = list(inp_str)
qudit_op_list = []
for word in word_list:
qubit_op = base_gates_dict[word]
qud_op = np.identity(qudit_dim_size, dtype=np.complex64)
qud_op[:2,:2] = qubit_op
qudit_op_list.append(qud_op)
complete_op = qudit_op_list[0]
for i in range(1,len(qudit_op_list)):
complete_op = np.kron(complete_op, qudit_op_list[i])
return complete_op
def evolve_state(batch_state, batch_var_str):
return_state = []
for state, var_str in zip(batch_state, batch_var_str):
complete_op = PauliToQuditMatrix(var_str, qudit_dim_size)
return_state.append(complete_op @ state)
return return_state
b_size = 400
batch_state = [init_state] * b_size
batch_var_str = [op_str] * b_size
%timeit evolve_state(batch_state, batch_var_str)
class JaxedSolver:
def __init__(
self,
schedule_func,
dt,
carrier_freqs,
ham_chans,
t_span,
rtol,
atol
):
super().__init__()
self.schedule_func = schedule_func
self.dt = dt
self.carrier_freqs = carrier_freqs
self.ham_chans = ham_chans
self.t_span = t_span
self.rtol = rtol
self.atol = atol
self.fast_batched_sim = jit(vmap(self.run_sim))
def run_sim(self, y0, obs, params):
sched = self.schedule_func(params)
converter = InstructionToSignals(self.dt, carriers=self.carrier_freqs, channels=self.ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=self.t_span,
y0=y0 / jnp.linalg.norm(y0),
t_eval=self.t_span,
signals=signals,
rtol=self.rtol,
atol=self.atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
state_vec = state_vec / jnp.linalg.norm(state_vec)
new_vec = obs @ state_vec
probs_vec = jnp.abs(new_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
# Shots instead of probabilities
return probs_vec
def estimate2(self, batch_y0, batch_params, batch_obs_str):
batch_obs = jnp.zeros((batch_y0.shape[0], batch_y0.shape[1], batch_y0.shape[1]), dtype=jnp.complex64)
for i, b_str in enumerate(batch_obs_str):
batch_obs = batch_obs.at[i].set(PauliToQuditMatrix(b_str, dim))
return self.fast_batched_sim(batch_y0, batch_obs, batch_params)
j_solver_2 = JaxedSolver(
schedule_func=standard_func,
dt=dt,
carrier_freqs=chan_freqs,
ham_chans=ham_chans,
t_span=t_span,
rtol=rtol,
atol=atol
)
ops_str_list = ["IX", "XY", "ZX", "ZI"] * 100
batch_res = j_solver_2.estimate2(
batch_y0,
batch_params,
ops_str_list
)
%timeit j_solver_2.estimate2(batch_y0, batch_params, ops_str_list)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# All Imports
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
from qiskit_dynamics.array import Array
from qiskit.quantum_info import Statevector, DensityMatrix, Operator
from qiskit.circuit.parameter import Parameter
import jax
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready, config
import chex
from typing import Optional, Union
Array.set_default_backend('jax')
config.update('jax_enable_x64', True)
config.update('jax_platform_name', 'cpu')
# Constructing a Two Qutrit Hamiltonian
dim = 3
v0 = 4.86e9
anharm0 = -0.32e9
r0 = 0.22e9
v1 = 4.97e9
anharm1 = -0.32e9
r1 = 0.26e9
J = 0.002e9
a = np.diag(np.sqrt(np.arange(1, dim)), 1)
adag = np.diag(np.sqrt(np.arange(1, dim)), -1)
N = np.diag(np.arange(dim))
ident = np.eye(dim, dtype=complex)
full_ident = np.eye(dim**2, dtype=complex)
N0 = np.kron(ident, N)
N1 = np.kron(N, ident)
a0 = np.kron(ident, a)
a1 = np.kron(a, ident)
a0dag = np.kron(ident, adag)
a1dag = np.kron(adag, ident)
static_ham0 = 2 * np.pi * v0 * N0 + np.pi * anharm0 * N0 * (N0 - full_ident)
static_ham1 = 2 * np.pi * v1 * N1 + np.pi * anharm1 * N1 * (N1 - full_ident)
static_ham_full = static_ham0 + static_ham1 + 2 * np.pi * J * ((a0 + a0dag) @ (a1 + a1dag))
drive_op0 = 2 * np.pi * r0 * (a0 + a0dag)
drive_op1 = 2 * np.pi * r1 * (a1 + a1dag)
# Default Solver Options
y0 = Array(Statevector(np.ones(9)))
t_linspace = np.linspace(0.0, 400e-9, 11)
t_span = np.array([t_linspace[0], t_linspace[-1]])
dt = 1/4.5e9
atol = 1e-2
rtol = 1e-4
ham_ops = [drive_op0, drive_op1, drive_op0, drive_op1]
ham_chans = ["d0", "d1", "u0", "u1"]
chan_freqs = {"d0": v0, "d1": v1, "u0": v1, "u1": v0}
solver = Solver(
static_hamiltonian=static_ham_full,
hamiltonian_operators=ham_ops,
rotating_frame=static_ham_full,
hamiltonian_channels=ham_chans,
channel_carrier_freqs=chan_freqs,
dt=dt,
)
# Constructing a custom function that takes as input a parameter vector and returns the simulated state
def standard_func(params):
amp, sigma, freq = params
# Here we use a Drag Pulse as defined in qiskit pulse as its already a Scalable Symbolic Pulse
special_pulse = pulse.Drag(
duration=320,
amp=amp,
sigma=sigma,
beta=0.1,
angle=0.1,
limit_amplitude=False
)
with pulse.build(default_alignment='sequential') as sched:
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
u0 = pulse.ControlChannel(0)
u1 = pulse.ControlChannel(1)
pulse.shift_frequency(freq, d0)
pulse.play(special_pulse, d0)
pulse.shift_frequency(freq, d1)
pulse.play(special_pulse, d1)
pulse.shift_frequency(freq, u0)
pulse.play(special_pulse, u0)
pulse.shift_frequency(freq, u1)
pulse.play(special_pulse, u1)
return sched
def evolve_func(inp_y0, params, obs):
sched = standard_func(params)
converter = InstructionToSignals(dt, carriers=chan_freqs, channels=ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=t_span,
y0=inp_y0 / jnp.linalg.norm(inp_y0),
t_eval=t_linspace,
signals=signals,
rtol=rtol,
atol=atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
evolved_vec = jnp.dot(obs, state_vec) / jnp.linalg.norm(state_vec)
probs_vec = jnp.abs(evolved_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
return probs_vec
fast_evolve_func = jit(vmap(evolve_func))
batchsize = 400
amp_vals = jnp.linspace(0.5, 0.99, batchsize, dtype=jnp.float64).reshape(-1, 1)
sigma_vals = jnp.linspace(20, 80, batchsize, dtype=jnp.int8).reshape(-1, 1)
freq_vals = jnp.linspace(-0.5, 0.5, batchsize, dtype=jnp.float64).reshape(-1, 1) * 1e6
batch_params = jnp.concatenate((amp_vals, sigma_vals, freq_vals), axis=-1)
batch_y0 = jnp.tile(np.ones(9), (batchsize, 1))
batch_obs = jnp.tile(N0, (batchsize, 1, 1))
print(f"Batched Params Shape: {batch_params.shape}")
res = fast_evolve_func(batch_y0, batch_params, batch_obs)
print(res)
print(res.shape)
# Timing the fast jit + vmap batched simulation
%timeit fast_evolve_func(batch_y0, batch_params, batch_obs).block_until_ready()
# Timing a standard simulation without jitting or vmapping
%timeit evolve_func(batch_y0[200], batch_params[200], batch_obs[200])
# Constructing the new solver
class JaxifiedSolver:
def __init__(
self,
schedule_func,
dt,
carrier_freqs,
ham_chans,
t_span,
rtol,
atol
):
super().__init__()
self.schedule_func = schedule_func
self.dt = dt
self.carrier_freqs = carrier_freqs
self.ham_chans = ham_chans
self.t_span = t_span
self.rtol = rtol
self.atol = atol
self.fast_batched_sim = jit(vmap(self.run_sim))
def run_sim(self, y0, obs, params):
sched = self.schedule_func(params)
converter = InstructionToSignals(self.dt, carriers=self.carrier_freqs, channels=self.ham_chans)
signals = converter.get_signals(sched)
results = solver.solve(
t_span=self.t_span,
y0=y0 / jnp.linalg.norm(y0),
t_eval=self.t_span,
signals=signals,
rtol=self.rtol,
atol=self.atol,
convert_results=False,
method='jax_odeint'
)
state_vec = results.y.data[-1]
evolved_vec = jnp.dot(obs, state_vec) / jnp.linalg.norm(state_vec)
probs_vec = jnp.abs(evolved_vec)**2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
return probs_vec
j_solver = JaxifiedSolver(
schedule_func=standard_func,
dt=dt,
carrier_freqs=chan_freqs,
ham_chans=ham_chans,
t_span=t_span,
rtol=rtol,
atol=atol
)
batch_res = j_solver.fast_batched_sim(
batch_y0,
batch_obs,
batch_params
)
%timeit j_solver.fast_batched_sim(batch_y0, batch_obs, batch_params)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from typing import Optional, Union
import qiskit
from qiskit import IBMQ, pulse
from library.dynamics_backend_estimator import DynamicsBackendEstimator
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q-nus', group='default', project='default')
backend = provider.get_backend('ibm_cairo')
estimator = DynamicsBackendEstimator(backend)
estimator.run(
)
backend = JaxifiedDynamicsBackend() # Contains JaxSolver methods instead of Standard Solver
estimator = DynamicsBackendEstimator(backend)
estimator.run(
observables, # Either Qiskit Operators, Custom Class, or processed ndarrays
batch_params,
)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
import copy
import uuid
import datetime
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info.states.quantum_state import QuantumState
from qiskit_dynamics import DynamicsBackend, Solver, Signal, RotatingFrame
from qiskit_dynamics.solvers.solver_classes import (
format_final_states,
validate_and_format_initial_state,
)
from typing import Optional, List, Union, Callable, Tuple
from qiskit_dynamics.array import wrap
from qiskit import pulse
from qiskit_dynamics.models import HamiltonianModel, LindbladModel
from qiskit_dynamics.models.hamiltonian_model import is_hermitian
from qiskit_dynamics.type_utils import to_numeric_matrix_type
from qiskit import QuantumCircuit
from qiskit.result import Result
from qiskit.quantum_info import Statevector
from qiskit.pulse import Schedule, ScheduleBlock
from qiskit_dynamics.array import Array
import jax
import jax.numpy as jnp
from jax import block_until_ready, vmap
import numpy as np
from scipy.integrate._ivp.ivp import OdeResult
jit = wrap(jax.jit, decorator=True)
qd_vmap = wrap(vmap, decorator=True)
Runnable = Union[QuantumCircuit, Schedule, ScheduleBlock]
class JaxSolver(Solver):
"""This custom Solver behaves exactly like the original Solver object except one difference, the user
provides the function that can be jitted for faster simulations (should be provided to the class
non-jit compiled)"""
def __init__(
self,
static_hamiltonian: Optional[Array] = None,
hamiltonian_operators: Optional[Array] = None,
static_dissipators: Optional[Array] = None,
dissipator_operators: Optional[Array] = None,
hamiltonian_channels: Optional[List[str]] = None,
dissipator_channels: Optional[List[str]] = None,
channel_carrier_freqs: Optional[dict] = None,
dt: Optional[float] = None,
rotating_frame: Optional[Union[Array, RotatingFrame]] = None,
in_frame_basis: bool = False,
evaluation_mode: str = "dense",
rwa_cutoff_freq: Optional[float] = None,
rwa_carrier_freqs: Optional[Union[Array, Tuple[Array, Array]]] = None,
validate: bool = True,
schedule_func: Optional[Callable[[], Schedule]] = None,
):
"""Initialize solver with model information.
Args:
static_hamiltonian: Constant Hamiltonian term. If a ``rotating_frame``
is specified, the ``frame_operator`` will be subtracted from
the static_hamiltonian.
hamiltonian_operators: Hamiltonian operators.
static_dissipators: Constant dissipation operators.
dissipator_operators: Dissipation operators with time-dependent coefficients.
hamiltonian_channels: List of channel names in pulse schedules corresponding to
Hamiltonian operators.
dissipator_channels: List of channel names in pulse schedules corresponding to
dissipator operators.
channel_carrier_freqs: Dictionary mapping channel names to floats which represent
the carrier frequency of the pulse channel with the
corresponding name.
dt: Sample rate for simulating pulse schedules.
rotating_frame: Rotating frame to transform the model into. Rotating frames which
are diagonal can be supplied as a 1d array of the diagonal elements,
to explicitly indicate that they are diagonal.
in_frame_basis: Whether to represent the model in the basis in which the rotating
frame operator is diagonalized. See class documentation for a more
detailed explanation on how this argument affects object behaviour.
evaluation_mode: Method for model evaluation. See documentation for
``HamiltonianModel.evaluation_mode`` or
``LindbladModel.evaluation_mode``.
(if dissipators in model) for valid modes.
rwa_cutoff_freq: Rotating wave approximation cutoff frequency. If ``None``, no
approximation is made.
rwa_carrier_freqs: Carrier frequencies to use for rotating wave approximation.
If no time dependent coefficients in model leave as ``None``,
if no time-dependent dissipators specify as a list of frequencies
for each Hamiltonian operator, and if time-dependent dissipators
present specify as a tuple of lists of frequencies, one for
Hamiltonian operators and one for dissipators.
validate: Whether or not to validate Hamiltonian operators as being Hermitian.
jittable_func: Callable or list of Callables taking as inputs arrays such that parametrized pulse simulation can be done in an
optimized manner
Raises:
QiskitError: If arguments concerning pulse-schedule interpretation are insufficiently
specified.
"""
super().__init__(
static_hamiltonian,
hamiltonian_operators,
static_dissipators,
dissipator_operators,
hamiltonian_channels,
dissipator_channels,
channel_carrier_freqs,
dt,
rotating_frame,
in_frame_basis,
evaluation_mode,
rwa_cutoff_freq,
rwa_carrier_freqs,
validate,
)
self._schedule_func = schedule_func
@property
def circuit_macro(self):
return self._schedule_func
@circuit_macro.setter
def set_macro(self, func):
"""
This setter should be done each time one wants to switch the target circuit truncation
"""
self._schedule_func = func
def _solve_schedule_list_jax(
self,
t_span_list: List[Array],
y0_list: List[Union[Array, QuantumState, BaseOperator]],
schedule_list: List[Schedule],
convert_results: bool = True,
**kwargs,
) -> List[OdeResult]:
param_dicts = kwargs["parameter_dicts"]
relevant_params = param_dicts[0].keys()
observables_circuits = kwargs["observables"]
param_values = kwargs["parameter_values"]
for key in ["parameter_dicts", "parameter_values", "parameter_values"]:
kwargs.pop(key)
def sim_function(params, t_span, y0_input, y0_cls):
parametrized_schedule = self.circuit_macro()
parametrized_schedule.assign_parameters(
{
param_obj: param
for (param_obj, param) in zip(relevant_params, params)
}
)
signals = self._schedule_converter.get_signals(parametrized_schedule)
# Perhaps replace below by solve_lmde
results = self.solve(t_span, y0_input, signals, **kwargs)
results.y = format_final_states(results.y, self.model, y0_input, y0_cls)
return Array(results.t).data, Array(results.y).data
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
import numpy as np
import qiskit
from qiskit import pulse
from qiskit_dynamics import Solver, DynamicsBackend
from qiskit_dynamics.pulse import InstructionToSignals
import jax.numpy as jnp
from jax import jit, vmap, block_until_ready
import chex
from typing import Optional, Union
from library.utils import PauliToQuditOperator
class JaxedDynamicsBackend:
def __init__(
self,
):
super().__init__()
class JaxedSolver:
def __init__(
self,
schedule_func,
solver,
dt,
carrier_freqs,
ham_chans,
ham_ops,
t_span,
rtol,
atol,
):
super().__init__()
self.schedule_func = schedule_func
self.solver = solver
self.dt = dt
self.carrier_freqs = carrier_freqs
self.ham_chans = ham_chans
self.ham_ops = ham_ops
self.t_span = t_span
self.rtol = rtol
self.atol = atol
self.fast_batched_sim = jit(vmap(self.run_sim))
def run_sim(self, y0, obs, params):
sched = self.schedule_func(params)
converter = InstructionToSignals(
self.dt, carriers=self.carrier_freqs, channels=self.ham_chans
)
signals = converter.get_signals(sched)
results = self.solver.solve(
t_span=self.t_span,
y0=y0 / jnp.linalg.norm(y0),
t_eval=self.t_span,
signals=signals,
rtol=self.rtol,
atol=self.atol,
convert_results=False,
method="jax_odeint",
)
state_vec = results.y.data[-1]
state_vec = state_vec / jnp.linalg.norm(state_vec)
new_vec = obs @ state_vec
probs_vec = jnp.abs(new_vec) ** 2
probs_vec = jnp.clip(probs_vec, a_min=0.0, a_max=1.0)
# Shots instead of probabilities
return probs_vec
def estimate2(self, batch_y0, batch_params, batch_obs_str):
batch_obs = jnp.zeros(
(batch_y0.shape[0], batch_y0.shape[1], batch_y0.shape[1]),
dtype=jnp.complex64,
)
num_qubits = len(batch_obs_str[0])
for i, b_str in enumerate(batch_obs_str):
batch_obs = batch_obs.at[i].set(
PauliToQuditOperator(b_str, int(batch_y0.shape[1] ** (1 / num_qubits)))
)
return self.fast_batched_sim(batch_y0, batch_obs, batch_params)
|
https://github.com/AnikenC/JaxifiedQiskit
|
AnikenC
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utils for using with Qiskit unit tests."""
import logging
import os
import unittest
from enum import Enum
from qiskit import __path__ as qiskit_path
class Path(Enum):
"""Helper with paths commonly used during the tests."""
# Main SDK path: qiskit/
SDK = qiskit_path[0]
# test.python path: qiskit/test/python/
TEST = os.path.normpath(os.path.join(SDK, '..', 'test', 'python'))
# Examples path: examples/
EXAMPLES = os.path.normpath(os.path.join(SDK, '..', 'examples'))
# Schemas path: qiskit/schemas
SCHEMAS = os.path.normpath(os.path.join(SDK, 'schemas'))
# VCR cassettes path: qiskit/test/cassettes/
CASSETTES = os.path.normpath(os.path.join(TEST, '..', 'cassettes'))
# Sample QASMs path: qiskit/test/python/qasm
QASMS = os.path.normpath(os.path.join(TEST, 'qasm'))
def setup_test_logging(logger, log_level, filename):
"""Set logging to file and stdout for a logger.
Args:
logger (Logger): logger object to be updated.
log_level (str): logging level.
filename (str): name of the output file.
"""
# Set up formatter.
log_fmt = ('{}.%(funcName)s:%(levelname)s:%(asctime)s:'
' %(message)s'.format(logger.name))
formatter = logging.Formatter(log_fmt)
# Set up the file handler.
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Set the logging level from the environment variable, defaulting
# to INFO if it is not a valid level.
level = logging._nameToLevel.get(log_level, logging.INFO)
logger.setLevel(level)
class _AssertNoLogsContext(unittest.case._AssertLogsContext):
"""A context manager used to implement TestCase.assertNoLogs()."""
# pylint: disable=inconsistent-return-statements
def __exit__(self, exc_type, exc_value, tb):
"""
This is a modified version of TestCase._AssertLogsContext.__exit__(...)
"""
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
if self.watcher.records:
msg = 'logs of level {} or higher triggered on {}:\n'.format(
logging.getLevelName(self.level), self.logger.name)
for record in self.watcher.records:
msg += 'logger %s %s:%i: %s\n' % (record.name, record.pathname,
record.lineno,
record.getMessage())
self._raiseFailure(msg)
|
https://github.com/calebclothier/GoogleDTC
|
calebclothier
|
import numpy as np
import matplotlib.pyplot as plt
from qiskit import IBMQ, assemble, transpile
from qiskit import *
from statsmodels.graphics.tsaplots import plot_acf
N_QUBITS = 20
G = 0.98
T = 40
class DiscreteTimeCrystal:
def __init__(self, n_qubits: int) -> None:
self.n_qubits = n_qubits
provider = IBMQ.load_account()
self.backend = provider.backend.ibmq_qasm_simulator
def random_bitstring_circuit(self) -> QuantumCircuit:
"""
Args:
n_qubits: number of qubits in the circuit
Returns:
QuantumCircuit: object that creates a random bitstring from the ground state
"""
qc = QuantumCircuit(self.n_qubits)
random_state = np.random.randint(2, size=self.n_qubits)
for i in range(self.n_qubits):
if random_state[i]:
qc.x(i)
return qc
def floquet_circuit(self, n_qubits: int, g: float) -> QuantumCircuit:
"""
Args:
n_qubits: number of qubits in the floquet_circuit
g: parameter in range [0.5, 1] controlling the magnitude of x-rotation
Returns:
QuantumCircuit: implementation of the Floquet unitary circuit U_f described
in https://arxiv.org/pdf/2107.13571.pdf
"""
qc = QuantumCircuit(n_qubits)
# X rotation by (pi * g)
for i in range(n_qubits):
qc.rx(np.pi * g, i)
# Ising interaction (only coupling adjacent spins)
for i in range(0, n_qubits-1, 2):
phi = np.random.uniform(low=0.5, high=1.5)
theta = np.pi * phi / 2
qc.rzz(-theta, i, i+1)
for i in range(1, n_qubits-1, 2):
phi = np.random.uniform(low=0.5, high=1.5)
theta = np.pi * phi / 2
qc.rzz(-theta, i, i+1)
# Longitudinal fields for disorder
for i in range(n_qubits):
h = np.random.uniform(low=-1, high=1)
qc.rz(np.pi * h, i)
return qc
def mean_polarization(self, counts: dict, q_index: int) -> float:
"""
Args:
counts: dictionary of measurement results and corresponding counts
q_index: index of qubit in question
Returns:
float: the mean polarization, in [-1, 1], of the qubit at q_index, as given
by the counts dictionary
"""
exp, num_shots = 0, 0
for bitstring in counts.keys():
val = 1 if int(bitstring[self.n_qubits-q_index-1]) else -1
exp += val * counts[bitstring]
num_shots += counts[bitstring]
return exp / num_shots
def acf(self, series):
n = len(series)
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
acf_lag = ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
return round(acf_lag, 3)
x = np.arange(n) # Avoiding lag 0 calculation
acf_coeffs = list(map(r, x))
return acf_coeffs
def simulate(self, initial_state: QuantumCircuit, T: float, g: float, plot=False) -> None:
exp_arr = []
floq_qc = self.floquet_circuit(self.n_qubits, g)
for t in range(1, T):
qc = QuantumCircuit(self.n_qubits)
qc = qc.compose(initial_state)
for i in range(t):
qc = qc.compose(floq_qc)
qc.measure_all()
transpiled = transpile(qc, backend=self.backend)
job = self.backend.run(transpiled)
retrieved_job = self.backend.retrieve_job(job.job_id())
counts = retrieved_job.result().get_counts()
exp = self.mean_polarization(counts, 11)
exp_arr.append(exp)
if plot:
plt.plot(range(1, T), exp_arr, 'ms-')
autocorr = self.acf(exp_arr)
print(autocorr)
plt.plot(range(1, T), autocorr, 'bs-')
plt.show()
return exp_arr
dtc = DiscreteTimeCrystal(n_qubits=N_QUBITS)
exp = []
ac = np.zeros(shape=(T-1))
for j in range(36):
print(j)
initial_state = dtc.random_bitstring_circuit()
q11_z_exp = dtc.simulate(initial_state=initial_state, T=T, g=G, plot=False)
q11_z_ac = dtc.acf(q11_z_exp)
ac += np.array(q11_z_ac)
print(ac)
ac = ac / 36
plt.plot(range(1, T), ac, 'bs-')
plt.show()
|
https://github.com/calebclothier/GoogleDTC
|
calebclothier
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from qiskit import IBMQ, assemble, transpile
from qiskit import QuantumCircuit
N_QUBITS = 20 # Number of qubits used in Google paper
# Link to IBMQ account with API token
#IBMQ.save_account(API_TOKEN)
# Load IBMQ cloud-based QASM simulator
provider = IBMQ.load_account()
backend = provider.backend.ibmq_qasm_simulator
def random_bitstring_circuit(n_qubits: int) -> QuantumCircuit:
"""
Args:
n_qubits: desired number of qubits in the bitstring
Returns:
QuantumCircuit: creates a random bitstring from the ground state
"""
qc = QuantumCircuit(n_qubits)
# Generate random bitstring
random_bitstring = np.random.randint(2, size=n_qubits)
# Apply X gate to nonzero qubits in bitstring
for i in range(n_qubits):
if random_bitstring[i]:
qc.x(i)
return qc
def floquet_circuit(n_qubits: int, g: float) -> QuantumCircuit:
"""
Args:
n_qubits: number of qubits
g: parameter controlling amount of x-rotation
Returns:
QuantumCircuit: circuit implementation of the unitary operator U_f as
detailed in https://arxiv.org/pdf/2107.13571.pdf
"""
qc = QuantumCircuit(n_qubits)
# X rotation by g*pi on all qubits (simulates the periodic driving pulse)
for i in range(n_qubits):
qc.rx(g*np.pi, i)
qc.barrier()
# Ising interaction (only couples adjacent spins with random coupling strengths)
for i in range(0, n_qubits-1, 2):
phi = np.random.uniform(low=0.5, high=1.5)
theta = -phi * np.pi / 2
qc.rzz(theta, i, i+1)
for i in range(1, n_qubits-1, 2):
phi = np.random.uniform(low=0.5, high=1.5)
theta = -phi * np.pi / 2
qc.rzz(theta, i, i+1)
qc.barrier()
# Longitudinal fields for disorder
for i in range(n_qubits):
h = np.random.uniform(low=-1, high=1)
qc.rz(h * np.pi, i)
return qc
def calculate_mean_polarization(n_qubits: int, counts: dict, q_index: int) -> float:
"""
Args:
n_qubits: total number of qubits
counts: dictionary of bitstring measurement outcomes and their respective total counts
q_index: index of qubit whose expected polarization we want to calculate
Returns:
float: the mean Z-polarization <Z>, in [-1, 1], of the qubit at q_index
"""
run, num_shots = 0, 0
for bitstring in counts.keys():
val = 1 if (int(bitstring[n_qubits-q_index-1]) == 0) else -1
run += val * counts[bitstring]
num_shots += counts[bitstring]
return run / num_shots
def calculate_two_point_correlations(series: list) -> list:
"""
Args:
series: time-ordered list of expectation values for some random variable
Returns:
list: two point correlations <f(0)f(t)> of the random variable evaluated at all t>0
"""
n = len(series)
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
acf_lag = ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
return round(acf_lag, 3)
x = np.arange(n) # Avoiding lag 0 calculation
acf_coeffs = list(map(r, x))
return acf_coeffs
def simulate(n_qubits: int, initial_state: QuantumCircuit, max_time_steps: int, g: float) -> None:
mean_polarizations = np.zeros((n_qubits, max_time_steps+1))
floq_qc = floquet_circuit(n_qubits, g)
for t in range(0, max_time_steps+1):
if ((t % 5) == 0):
print('Time t=%d' % t)
qc = QuantumCircuit(n_qubits)
qc = qc.compose(initial_state)
for i in range(t):
qc = qc.compose(floq_qc)
qc.measure_all()
transpiled = transpile(qc, backend)
job = backend.run(transpiled)
retrieved_job = backend.retrieve_job(job.job_id())
counts = retrieved_job.result().get_counts()
for qubit in range(n_qubits):
mean_polarizations[qubit,t] = calculate_mean_polarization(n_qubits, counts, q_index=qubit)
return mean_polarizations
polarized_state = QuantumCircuit(N_QUBITS) # All qubits in |0> state
thermal_z = simulate(n_qubits=N_QUBITS,
initial_state=polarized_state,
max_time_steps=50,
g=0.6)
fig, ax = plt.subplots(figsize=(10,10))
im = ax.matshow(thermal_z, cmap='viridis')
plt.rcParams.update({'font.size': 15})
plt.rcParams['text.usetex'] = True
ax.set_xlabel('Floquet cycles (t)')
ax.xaxis.labelpad = 10
ax.set_ylabel('Qubit')
ax.set_xticks(np.arange(0, 51, 10))
ax.set_yticks(np.arange(0, N_QUBITS, 5))
ax.xaxis.set_label_position('top')
im.set_clim(-1, 1)
cbar = plt.colorbar(im, fraction=0.018, pad=0.04)
cbar.set_label(r'$\langle Z(t) \rangle$')
plt.show()
plt.plot(thermal_z[10,:], 'bs-')
plt.xlabel('Floquet cycles (t)')
plt.tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False)
plt.ylabel(r'$\langle Z(t) \rangle$')
dtc_z = simulate(n_qubits=N_QUBITS,
initial_state=polarized_state,
max_time_steps=50,
g=0.97)
fig, ax = plt.subplots(figsize=(10,10))
im = ax.matshow(dtc_z, cmap='viridis')
plt.rcParams.update({'font.size': 15})
plt.rcParams['text.usetex'] = True
ax.set_xlabel('Floquet cycles (t)')
ax.xaxis.labelpad = 10
ax.set_ylabel('Qubit')
ax.set_xticks(np.arange(0, 51, 10))
ax.set_yticks(np.arange(0, N_QUBITS, 5))
ax.xaxis.set_label_position('top')
im.set_clim(-1, 1)
cbar = plt.colorbar(im, fraction=0.018, pad=0.04)
cbar.set_label(r'$\langle Z(t) \rangle$')
plt.show()
plt.plot(dtc_z[10,:], 'bs-')
plt.xlabel('Floquet cycles (t)')
plt.tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False)
plt.ylabel(r'$\langle Z(t) \rangle$')
|
https://github.com/calebclothier/GoogleDTC
|
calebclothier
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from qiskit import IBMQ, assemble, transpile
from qiskit import QuantumCircuit
N_QUBITS = 20 # Number of qubits used in Google paper
# Link to IBMQ account with API token
#IBMQ.save_account(API_TOKEN)
# Load IBMQ cloud-based QASM simulator
provider = IBMQ.load_account()
backend = provider.backend.ibmq_qasm_simulator
def random_bitstring_circuit(n_qubits: int) -> QuantumCircuit:
"""
Args:
n_qubits: desired number of qubits in the bitstring
Returns:
QuantumCircuit: creates a random bitstring from the ground state
"""
qc = QuantumCircuit(n_qubits)
# Generate random bitstring
random_bitstring = np.random.randint(2, size=n_qubits)
# Apply X gate to nonzero qubits in bitstring
for i in range(n_qubits):
if random_bitstring[i]:
qc.x(i)
return qc
def floquet_circuit(n_qubits: int, g: float) -> QuantumCircuit:
"""
Args:
n_qubits: number of qubits
g: parameter controlling amount of x-rotation
Returns:
QuantumCircuit: circuit implementation of the unitary operator U_f as
detailed in https://arxiv.org/pdf/2107.13571.pdf
"""
qc = QuantumCircuit(n_qubits)
# X rotation by g*pi on all qubits (simulates the periodic driving pulse)
for i in range(n_qubits):
qc.rx(g*np.pi, i)
qc.barrier()
# Ising interaction (only couples adjacent spins with random coupling strengths)
for i in range(0, n_qubits-1, 2):
phi = np.random.uniform(low=0.5, high=1.5)
theta = -phi * np.pi / 2
qc.rzz(theta, i, i+1)
for i in range(1, n_qubits-1, 2):
phi = np.random.uniform(low=0.5, high=1.5)
theta = -phi * np.pi / 2
qc.rzz(theta, i, i+1)
qc.barrier()
# Longitudinal fields for disorder
for i in range(n_qubits):
h = np.random.uniform(low=-1, high=1)
qc.rz(h * np.pi, i)
return qc
def calculate_mean_polarization(n_qubits: int, counts: dict, q_index: int) -> float:
"""
Args:
n_qubits: total number of qubits
counts: dictionary of bitstring measurement outcomes and their respective total counts
q_index: index of qubit whose expected polarization we want to calculate
Returns:
float: the mean Z-polarization <Z>, in [-1, 1], of the qubit at q_index
"""
run, num_shots = 0, 0
for bitstring in counts.keys():
val = 1 if (int(bitstring[n_qubits-q_index-1]) == 0) else -1
run += val * counts[bitstring]
num_shots += counts[bitstring]
return run / num_shots
def calculate_two_point_correlations(series: list) -> list:
"""
Args:
series: time-ordered list of expectation values for some random variable
Returns:
list: two point correlations <f(0)f(t)> of the random variable evaluated at all t>0
"""
n = len(series)
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
acf_lag = ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
return round(acf_lag, 3)
x = np.arange(n) # Avoiding lag 0 calculation
acf_coeffs = list(map(r, x))
return acf_coeffs
def simulate(n_qubits: int, initial_state: QuantumCircuit, max_time_steps: int, g: float) -> None:
mean_polarizations = np.zeros((n_qubits, max_time_steps+1))
floq_qc = floquet_circuit(n_qubits, g)
for t in range(0, max_time_steps+1):
if ((t % 5) == 0):
print('Time t=%d' % t)
qc = QuantumCircuit(n_qubits)
qc = qc.compose(initial_state)
for i in range(t):
qc = qc.compose(floq_qc)
qc.measure_all()
transpiled = transpile(qc, backend)
job = backend.run(transpiled)
retrieved_job = backend.retrieve_job(job.job_id())
counts = retrieved_job.result().get_counts()
for qubit in range(n_qubits):
mean_polarizations[qubit,t] = calculate_mean_polarization(n_qubits, counts, q_index=qubit)
return mean_polarizations
polarized_state = QuantumCircuit(N_QUBITS) # All qubits in |0> state
thermal_z = simulate(n_qubits=N_QUBITS,
initial_state=polarized_state,
max_time_steps=50,
g=0.6)
fig, ax = plt.subplots(figsize=(10,10))
im = ax.matshow(thermal_z, cmap='viridis')
plt.rcParams.update({'font.size': 15})
plt.rcParams['text.usetex'] = True
ax.set_xlabel('Floquet cycles (t)')
ax.xaxis.labelpad = 10
ax.set_ylabel('Qubit')
ax.set_xticks(np.arange(0, 51, 10))
ax.set_yticks(np.arange(0, N_QUBITS, 5))
ax.xaxis.set_label_position('top')
im.set_clim(-1, 1)
cbar = plt.colorbar(im, fraction=0.018, pad=0.04)
cbar.set_label(r'$\langle Z(t) \rangle$')
plt.show()
plt.plot(thermal_z[10,:], 'bs-')
plt.xlabel('Floquet cycles (t)')
plt.tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False)
plt.ylabel(r'$\langle Z(t) \rangle$')
dtc_z = simulate(n_qubits=N_QUBITS,
initial_state=polarized_state,
max_time_steps=50,
g=0.97)
fig, ax = plt.subplots(figsize=(10,10))
im = ax.matshow(dtc_z, cmap='viridis')
plt.rcParams.update({'font.size': 15})
plt.rcParams['text.usetex'] = True
ax.set_xlabel('Floquet cycles (t)')
ax.xaxis.labelpad = 10
ax.set_ylabel('Qubit')
ax.set_xticks(np.arange(0, 51, 10))
ax.set_yticks(np.arange(0, N_QUBITS, 5))
ax.xaxis.set_label_position('top')
im.set_clim(-1, 1)
cbar = plt.colorbar(im, fraction=0.018, pad=0.04)
cbar.set_label(r'$\langle Z(t) \rangle$')
plt.show()
plt.plot(dtc_z[10,:], 'bs-')
plt.xlabel('Floquet cycles (t)')
plt.tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False)
plt.ylabel(r'$\langle Z(t) \rangle$')
|
https://github.com/FMZennaro/QuantumGames
|
FMZennaro
|
import numpy as np
import gym
from IPython.display import display
import qcircuit
env = gym.make('qcircuit-v0')
env.reset()
display(env.render())
done = False
while(not done):
obs, _, done, info = env.step(env.action_space.sample())
display(info['circuit_img'])
env.close()
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
env = DummyVecEnv([lambda: env])
modelPPO2 = PPO2(MlpPolicy, env, verbose=1)
modelPPO2.learn(total_timesteps=10000)
obs = env.reset()
display(env.render())
for _ in range(1):
action, _states = modelPPO2.predict(obs)
obs, _, done, info = env.step(action)
display(info[0]['circuit_img'])
env.close()
from stable_baselines import A2C
modelA2C = A2C(MlpPolicy, env, verbose=1)
modelA2C.learn(total_timesteps=10000)
obs = env.reset()
display(env.render())
for _ in range(1):
action, _states = modelA2C.predict(obs)
obs, _, done, info = env.step(action)
display(info[0]['circuit_img'])
env.close()
import evaluation
n_episodes = 1000
PPO2_perf, _ = evaluation.evaluate_model(modelPPO2, env, num_steps=n_episodes)
A2C_perf, _ = evaluation.evaluate_model(modelA2C, env, num_steps=n_episodes)
env = gym.make('qcircuit-v0')
rand_perf, _ = evaluation.evaluate_random(env, num_steps=n_episodes)
print('Mean performance of random agent (out of {0} episodes): {1}'.format(n_episodes,rand_perf))
print('Mean performance of PPO2 agent (out of {0} episodes): {1}'.format(n_episodes,PPO2_perf))
print('Mean performance of A2C agent (out of {0} episodes): {1}'.format(n_episodes,A2C_perf))
|
https://github.com/FMZennaro/QuantumGames
|
FMZennaro
|
import numpy as np
import gym
from IPython.display import display
import qcircuit
env = gym.make('qcircuit-v1')
env.reset()
display(env.render())
done = False
while(not done):
obs, _, done, info = env.step(env.action_space.sample())
display(info['circuit_img'])
env.close()
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
env = DummyVecEnv([lambda: env])
modelPPO2 = PPO2(MlpPolicy, env, verbose=1)
modelPPO2.learn(total_timesteps=10000)
obs = env.reset()
display(env.render())
for _ in range(10):
action, _states = modelPPO2.predict(obs)
obs, _, done, info = env.step(action)
display(info[0]['circuit_img'])
env.close()
from stable_baselines import A2C
modelA2C = A2C(MlpPolicy, env, verbose=1)
modelA2C.learn(total_timesteps=10000)
obs = env.reset()
display(env.render())
for _ in range(10):
action, _states = modelA2C.predict(obs)
obs, _, done, info = env.step(action)
display(info[0]['circuit_img'])
env.close()
import evaluation
n_episodes = 1000
PPO2_perf, _ = evaluation.evaluate_model(modelPPO2, env, num_steps=n_episodes)
A2C_perf, _ = evaluation.evaluate_model(modelA2C, env, num_steps=n_episodes)
env = gym.make('qcircuit-v1')
rand_perf, _ = evaluation.evaluate_random(env, num_steps=n_episodes)
print('Mean performance of random agent (out of {0} episodes): {1}'.format(n_episodes,rand_perf))
print('Mean performance of PPO2 agent (out of {0} episodes): {1}'.format(n_episodes,PPO2_perf))
print('Mean performance of A2C agent (out of {0} episodes): {1}'.format(n_episodes,A2C_perf))
|
https://github.com/FMZennaro/QuantumGames
|
FMZennaro
|
!conda create --name qiscoin python=3.7
!source activate qiscoin
!pip install qiskit
!pip install gym
!pip install stable-baselines
!pip install tensorflow==1.14.0
!git clone https://github.com/FMZennaro/gym-qcircuit.git
!pip install -e gym-qcircuit
|
https://github.com/anpaschool/qiskit-toolkit
|
anpaschool
|
import numpy as np
import IPython
import ipywidgets as widgets
import colorsys
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister
from qiskit import execute, Aer, BasicAer
from qiskit.visualization import plot_bloch_multivector
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import os
import glob
import moviepy.editor as mpy
import seaborn as sns
sns.set()
'''========State Vector======='''
def getStateVector(qc):
'''get state vector in row matrix form'''
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc,backend).result()
vec = job.get_statevector(qc)
return vec
def vec_in_braket(vec: np.ndarray) -> str:
'''get bra-ket notation of vector'''
nqubits = int(np.log2(len(vec)))
state = ''
for i in range(len(vec)):
rounded = round(vec[i], 3)
if rounded != 0:
basis = format(i, 'b').zfill(nqubits)
state += np.str(rounded).replace('-0j', '+0j')
state += '|' + basis + '\\rangle + '
state = state.replace("j", "i")
return state[0:-2].strip()
def vec_in_text_braket(vec):
return '$$\\text{{State:\n $|\\Psi\\rangle = $}}{}$$'\
.format(vec_in_braket(vec))
def writeStateVector(vec):
return widgets.HTMLMath(vec_in_text_braket(vec))
'''==========Bloch Sphere ========='''
def getBlochSphere(qc):
'''plot multi qubit bloch sphere'''
vec = getStateVector(qc)
return plot_bloch_multivector(vec)
def getBlochSequence(path,figs):
'''plot block sphere sequence and save it
to a folder for gif movie creation'''
try:
os.mkdir(path)
except:
print('Directory already exist')
for i,fig in enumerate(figs):
fig.savefig(path+"/rot_"+str(i)+".png")
return
def getBlochGif(figs,path,fname,fps,remove = True):
'''create gif movie from provided images'''
file_list = glob.glob(path + "/*.png")
list.sort(file_list, key=lambda x: int(x.split('_')[1].split('.png')[0]))
clip = mpy.ImageSequenceClip(file_list, fps=fps)
clip.write_gif('{}.gif'.format(fname), fps=fps)
'''remove all image files after gif creation'''
if remove:
for file in file_list:
os.remove(file)
return
'''=========Matrix================='''
def getMatrix(qc):
'''get numpy matrix representing a circuit'''
backend = BasicAer.get_backend('unitary_simulator')
job = execute(qc, backend)
ndArray = job.result().get_unitary(qc, decimals=3)
Matrix = np.matrix(ndArray)
return Matrix
def plotMatrix(M):
'''visualize a matrix using seaborn heatmap'''
MD = [["0" for i in range(M.shape[0])] for j in range(M.shape[1])]
for i in range(M.shape[0]):
for j in range(M.shape[1]):
r = M[i,j].real
im = M[i,j].imag
MD[i][j] = str(r)[0:4]+ " , " +str(im)[0:4]
plt.figure(figsize = [2*M.shape[1],M.shape[0]])
sns.heatmap(np.abs(M),\
annot = np.array(MD),\
fmt = '',linewidths=.5,\
cmap='Blues')
return
'''=========Measurement========'''
def getCount(qc):
backend= Aer.get_backend('qasm_simulator')
result = execute(qc,backend).result()
counts = result.get_counts(qc)
return counts
def plotCount(counts,figsize):
plot_histogram(counts)
'''========Phase============'''
def getPhaseCircle(vec):
'''get phase color, angle and radious of phase circir'''
Phase = []
for i in range(len(vec)):
angles = (np.angle(vec[i]) + (np.pi * 4)) % (np.pi * 2)
rgb = colorsys.hls_to_rgb(angles / (np.pi * 2), 0.5, 0.5)
mag = np.abs(vec[i])
Phase.append({"rgb":rgb,"mag": mag,"ang":angles})
return Phase
def getPhaseDict(QCs):
'''get a dictionary of state vector phase circles for
each quantum circuit and populate phaseDict list'''
phaseDict = []
for qc in QCs:
vec = getStateVector(qc)
Phase = getPhaseCircle(vec)
phaseDict.append(Phase)
return phaseDict
def plotiPhaseCircle(phaseDict,depth,path,show=False,save=False):
'''plot any quantum circuit phase circle diagram
from provided phase Dictionary'''
r = 0.30
dx = 1.0
nqubit = len(phaseDict[0])
fig = plt.figure(figsize = [depth,nqubit])
for i in range(depth):
x0 = i
for j in range(nqubit):
y0 = j+1
try:
mag = phaseDict[i][j]['mag']
ang = phaseDict[i][j]['ang']
rgb = phaseDict[i][j]['rgb']
ax=plt.gca()
circle1= plt.Circle((dx+x0,y0), radius = r, color = 'white')
ax.add_patch(circle1)
circle2= plt.Circle((dx+x0,y0), radius= r*mag, color = rgb)
ax.add_patch(circle2)
line = plt.plot((dx+x0,dx+x0+(r*mag*np.cos(ang))),\
(y0,y0+(r*mag*np.sin(ang))),color = "black")
except:
ax=plt.gca()
circle1= plt.Circle((dx+x0,y0), radius = r, color = 'white')
ax.add_patch(circle1)
plt.ylim(nqubit+1,0)
plt.yticks([y+1 for y in range(nqubit)])
plt.xticks([x for x in range(depth+2)])
plt.xlabel("Circuit Depth")
plt.ylabel("Basis States")
if show:
plt.show()
plt.savefig(path+".png")
plt.close(fig)
if save:
plt.savefig(path +".png")
plt.close(fig)
return
def plotiPhaseCircle_rotated(phaseDict,depth,path,show=False,save=False):
'''plot any quantum circuit phase circle diagram
from provided phase Dictionary'''
r = 0.30
dy = 1.0
nqubit = len(phaseDict[0])
fig = plt.figure(figsize = [nqubit,depth])
for i in range(depth):
y0 = i
for j in range(nqubit):
x0 = j+1
try:
mag = phaseDict[i][j]['mag']
ang = phaseDict[i][j]['ang']
rgb = phaseDict[i][j]['rgb']
ax=plt.gca()
circle1= plt.Circle((x0,dy+y0), radius = r, color = 'white')
ax.add_patch(circle1)
circle2= plt.Circle((x0,dy+y0), radius= r*mag, color = rgb)
ax.add_patch(circle2)
line = plt.plot((x0,x0+(r*mag*np.cos(ang))),\
(dy+y0,dy+y0+(r*mag*np.sin(ang))),color = "black")
except:
ax=plt.gca()
circle1= plt.Circle((x0,dy+y0), radius = r, color = 'white')
ax.add_patch(circle1)
plt.ylim(0,depth+1)
plt.yticks([x+1 for x in range(depth)])
plt.xticks([y for y in range(nqubit+2)])
plt.ylabel("Circuit Depth")
plt.xlabel("Basis States")
if show:
plt.show()
plt.savefig(path+".png")
plt.close(fig)
if save:
plt.savefig(path +".png")
plt.close(fig)
return
def getPhaseSequence(QCs,path,rotated=False):
'''plot a sequence of phase circle diagram for a given
sequence of quantum circuits'''
try:
os.mkdir(path)
except:
print("Directory already exist")
depth = len(QCs)
phaseDict =[]
for i,qc in enumerate(QCs):
vec = getStateVector(qc)
Phase = getPhaseCircle(vec)
phaseDict.append(Phase)
ipath = path + "phase_" + str(i)
if rotated:
plotiPhaseCircle_rotated(phaseDict,depth,ipath,save=True,show=False)
else:
plotiPhaseCircle(phaseDict,depth,ipath,save=True,show=False)
return
def getPhaseGif(path,fname,fps,remove = True):
'''create a gif movie file from phase circle figures'''
file_list = glob.glob(path+ "/*.png")
list.sort(file_list, key=lambda x: int(x.split('_')[1].split('.png')[0]))
clip = mpy.ImageSequenceClip(file_list, fps=fps)
clip.write_gif('{}.gif'.format(fname), fps=fps)
'''remove all image files after gif creation'''
if remove:
for file in file_list:
os.remove(file)
return
|
https://github.com/anpaschool/qiskit-toolkit
|
anpaschool
|
%matplotlib inline
import numpy as np
import IPython
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import seaborn as sns
sns.set()
from helper import *
import os
import glob
import moviepy.editor as mpy
figs = []
QCs = []
qc_e1 = QuantumCircuit(1)
figs.append(getBlochSphere(qc_e1.copy()))
QCs.append(qc_e1.copy())
qc_e1.h(0)
qc_e1.barrier()
figs.append(getBlochSphere(qc_e1.copy()))
QCs.append(qc_e1.copy())
for i in range(10):
qc_e1.rz(np.pi/5, 0)
figs.append(getBlochSphere(qc_e1.copy()))
QCs.append(qc_e1.copy())
qc_e1.barrier()
qc_e1.h(0)
figs.append(getBlochSphere(qc_e1.copy()))
QCs.append(qc_e1.copy())
style = {'backgroundcolor': 'lavender'}
qc_e1.draw(output='mpl', style = style)
path = "plots/bloch1"
fname = "plots/bloch1"
fps = 5
getBlochSequence(path, figs)
getBlochGif(figs,path,fname,fps,remove = False)
print("gif file is ready!")
path = "plots/phase1/"
fname = "plots/phase1"
fps =5
getPhaseSequence(QCs,path)
getPhaseGif(path,fname,fps,remove = False)
print("gif file is ready!")
figs = []
QCs = []
qc_e2 = QuantumCircuit(2)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
qc_e2.h(0)
qc_e2.u3(np.pi/4,np.pi/4,0,1)
qc_e2.barrier()
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
for i in range(8):
qc_e2.rz(np.pi/4, 0)
qc_e2.rz(np.pi/4, 1)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
qc_e2.barrier()
qc_e2.h(0)
qc_e2.u3(-np.pi/4,-np.pi/4,0,1)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
style = {'backgroundcolor': 'lavender'}
qc_e2.draw(output='mpl', style = style)
path = "plots/bloch2/"
fname = "plots/bloch2"
fps = 5
getBlochSequence(path, figs)
getBlochGif(figs,path,fname,fps,remove = False)
print("gif file is ready!")
path = "plots/phase2/"
fname = "plots/phase2"
fps = 5
getPhaseSequence(QCs,path)
getPhaseGif(path,fname,fps=5,remove = False)
print("gif file is ready!")
figs = []
QCs = []
qc_e2 = QuantumCircuit(3)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
qc_e2.h(0)
qc_e2.h(1)
qc_e2.h(2)
qc_e2.x(0)
qc_e2.y(1)
qc_e2.z(2)
qc_e2.barrier()
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
for i in range(8):
qc_e2.u3(0,0,np.pi/4,0)
qc_e2.u3(np.pi/4,0,0,1)
qc_e2.u3(0,np.pi/4,0,2)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
qc_e2.barrier()
qc_e2.x(0)
qc_e2.y(1)
qc_e2.z(2)
qc_e2.h(0)
qc_e2.h(1)
qc_e2.h(2)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
style = {'backgroundcolor': 'lavender'}
qc_e2.draw(output='mpl', style = style)
path = "plots/bloch3/"
fname = "plots/bloch3"
fps = 5
getBlochSequence(path, figs)
getBlochGif(figs,path,fname,fps,remove = False)
print("gif file is ready!")
path = "plots/phase3/"
fname = "plots/phase3"
fps = 5
getPhaseSequence(QCs,path)
getPhaseGif(path,fname,fps=5,remove = False)
print("gif file is ready!")
figs = []
QCs = []
qc_e2 = QuantumCircuit(4)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
qc_e2.h(0)
qc_e2.h(1)
qc_e2.h(2)
qc_e2.h(3)
qc_e2.x(0)
qc_e2.y(1)
qc_e2.z(2)
qc_e2.x(3)
qc_e2.barrier()
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
for i in range(8):
qc_e2.u3(0,0,np.pi/4,0)
qc_e2.u3(np.pi/4,0,0,1)
qc_e2.u3(0,np.pi/4,0,2)
qc_e2.rz(np.pi/4, 3)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
qc_e2.barrier()
qc_e2.x(0)
qc_e2.y(1)
qc_e2.z(2)
qc_e2.x(3)
qc_e2.h(0)
qc_e2.h(1)
qc_e2.h(2)
qc_e2.h(3)
figs.append(getBlochSphere(qc_e2.copy()))
QCs.append(qc_e2.copy())
style = {'backgroundcolor': 'lavender'}
qc_e2.draw(output='mpl', style = style)
path = "plots/bloch4/"
fname = "plots/bloch4"
fps = 5
getBlochSequence(path, figs)
getBlochGif(figs,path,fname,fps,remove = False)
print("gif file is ready!")
path = "plots/phase4/"
fname = "plots/phase4"
fps = 5
getPhaseSequence(QCs,path,rotated=True)
#getPhaseSequence(QCs,path)
getPhaseGif(path,fname,fps=5,remove = False)
print("gif file is ready!")
figs = []
QCs = []
n = 3
q = QuantumRegister(n)
c = ClassicalRegister(n)
qc = QuantumCircuit(q,c)
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.h(q[2])
qc.barrier()
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.cu1(np.pi/2, q[1], q[2])
qc.barrier()
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.h(q[1])
qc.barrier()
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.cu1(np.pi/4, q[0], q[2])
qc.barrier()
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.cu1(np.pi/2, q[0], q[1])
qc.barrier()
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.h(q[0])
qc.barrier()
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
qc.swap(q[0], q[2])
figs.append(getBlochSphere(qc.copy()))
QCs.append(qc.copy())
path = "plots/qftb/"
fname = "plots/qftb"
fps = 5
getBlochSequence(path, figs)
getBlochGif(figs,path,fname,fps,remove = False)
print("gif file is ready!")
path = "plots/qftp/"
fname = "plots/qftp"
fps = 5
getPhaseSequence(QCs,path,rotated=True)
#getPhaseSequence(QCs,path)
getPhaseGif(path,fname,fps=5,remove = False)
print("gif file is ready!")
|
https://github.com/anpaschool/qiskit-toolkit
|
anpaschool
|
%matplotlib inline
import numpy as np
import IPython
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit
from qiskit.tools.jupyter import *
from qiskit.visualization import *
import seaborn as sns
sns.set()
from helper import *
import os
import glob
import moviepy.editor as mpy
qc1 = QuantumCircuit(1)
qc1.rz(np.pi/4, 0)
style = {'backgroundcolor': 'lavender'}
qc1.draw(output='mpl', style = style)
getStateVector(qc1)
getBlochSphere(qc1)
qc2 = QuantumCircuit(2)
qc2.rz(np.pi/4, 0)
qc2.rz(np.pi/4, 1)
style = {'backgroundcolor': 'lavender'}
qc2.draw(output='mpl', style = style)
qc_e1 = QuantumCircuit(1)
qc_e1.h(0)
qc_e1.barrier()
for i in range(10):
qc_e1.rz(np.pi/5, 0)
qc_e1.barrier()
qc_e1.h(0)
style = {'backgroundcolor': 'lavender'}
qc_e1.draw(output='mpl', style = style)
qc = QuantumCircuit(2)
qc.h(0)
qc.u3(np.pi/4,np.pi/4,0,1)
qc.barrier()
for i in range(8):
qc.rz(np.pi/4, 0)
qc.rz(np.pi/4, 1)
qc.barrier()
qc.h(0)
qc.u3(-np.pi/4,-np.pi/4,0,1)
style = {'backgroundcolor': 'lavender'}
qc.draw(output='mpl', style = style)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
train_df = pd.read_csv("data/preprocessed/train_new.csv", index_col=None)
dev_df = pd.read_csv("data/preprocessed/dev_new.csv", index_col=None)
print(len(train_df), len(dev_df))
train_df = train_df.dropna()
dev_df = dev_df.dropna()
print(len(train_df), len(dev_df))
train_df.info()
train_df['Text'] == train_df['Text'].astype(str)
dev_df['Text'] == dev_df['Text'].astype(str)
train_df.head()
from sklearn.feature_extraction.text import TfidfVectorizer
def whitespace_tokenizer(text: str):
return text.split()
train_texts = train_df['Text']
train_labels = train_df['Target']
dev_texts = dev_df['Text']
dev_labels = dev_df['Target']
# get tf-idf vectors
tfidf_vectorizer = TfidfVectorizer(tokenizer=whitespace_tokenizer)
train_tfidf = tfidf_vectorizer.fit_transform(train_texts)
dev_tfidf = tfidf_vectorizer.transform(dev_texts)
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
for c in [0.001, 0.01, 0.1, 1, 10, 100, 1000]:
lsvc = LinearSVC(C=c)
lsvc.fit(train_tfidf, train_labels)
preds = lsvc.predict(dev_tfidf)
print(f"C={c:6}, acc: {accuracy_score(dev_labels, preds):.3f}")
for c in [0.001, 0.01, 0.1, 1, 10, 100, 1000]:
lr = LogisticRegression(C=c)
lr.fit(train_tfidf, train_labels)
preds = lr.predict(dev_tfidf)
print(f"C={c:6}, acc: {accuracy_score(dev_labels, preds):.3f}")
for a in [0.001, 0.01, 0.1, 1, 10, 100, 1000]:
nb = MultinomialNB(alpha=a)
nb.fit(train_tfidf, train_labels)
preds = nb.predict(dev_tfidf)
print(f"alpha={a: 6}, acc: {accuracy_score(dev_labels, preds):.3f}")
for n_neighbors in range(1, 10):
for weights in ['uniform', 'distance']:
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(train_tfidf, train_labels)
preds = knn.predict(dev_tfidf)
print(f"n_neighbors={n_neighbors: 2}, weights={weights:9}, acc: {accuracy_score(dev_labels, preds):.3f}")
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import re
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk import pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
import os
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=120
print(os.getcwd())
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv("/app/data/twitter_training.csv", names=columns,header=None)
data.head()
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative')]
df_train.head()
df_train.info()
df_train= df_train.drop_duplicates()
df_train.info()
sns.countplot(x="Target",data=df_train)
data_val = pd.read_csv("/app/data/twitter_validation.csv", names=columns,header=None)
data_val.head()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative')]
df_val.head()
df_val.info()
sns.countplot(x="Target",data=df_val)
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
stemmer = SnowballStemmer('english')
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
text = emoji_pattern.sub(r'', text)
tokens = []
for token in text.split():
tokens.append(token)
return " ".join(tokens)
df_train["Text"] = df_train["Text"].apply(preprocess)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"].head()
df_val["Text"] = df_val["Text"].apply(preprocess)
df_val["Text"]=df_val["Text"].str.replace("im","i am")
df_val["Text"].head()
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import tarfile
from urllib.request import urlretrieve
from depccg.instance_models import MODEL_DIRECTORY
URL = 'https://qnlp.cambridgequantum.com/models/tri_headfirst.tar.gz'
print('Please consider using Bobcat, the parser included with lambeq,\n'
'instead of depccg.')
def print_progress(chunk: int, chunk_size: int, size: int) -> None:
percentage = chunk * chunk_size / size
mb_size = size / 10**6
print(f'\rDownloading model... {percentage:.1%} of {mb_size:.1f} MB',
end='')
print(MODEL_DIRECTORY)
print('Downloading model...', end='')
download, _ = urlretrieve(URL, reporthook=print_progress)
print('\nExtracting model...')
tarfile.open(download).extractall(MODEL_DIRECTORY)
print('Download successful')
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as np
import numpy
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser, TreeReaderMode
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 50
EPOCHS = 200
SEED = 0
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
cleaned_qnlp.head(10)
cleaned_qnlp.info()
sns.countplot(x = "target", data = cleaned_qnlp)
cleaned_lemmatized_qnlp.head(10)
cleaned_lemmatized_qnlp.info()
sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
# parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
parser = TreeReader(mode=TreeReaderMode.RULE_TYPE)
NUM_DATA = 2578
loss = lambda y_hat, y: -np.sum(y * np.log(y_hat)) / len(y) # binary cross-entropy loss
acc = lambda y_hat, y: np.sum(np.round(y_hat) == y) / len(y) / 2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
data = cleaned__lemmatized_stemmed_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
alternate_parser = BobcatParser(verbose='text')
dig_0 = alternate_parser.sentence2diagram(cleaned__lemmatized_stemmed_qnlp['data'].tolist()[0])
grammar.draw(dig_0, figsize=(14,3), fontsize=12)
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
# train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
[(s, s.size) for s in train_circuits_1[0].free_symbols]
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
# model_1 = TketModel.from_diagrams(all_circuits_1, backend_config=backend_config)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
data = cleaned_lemmatized_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
data = cleaned_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as np
import numpy
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser, TreeReaderMode
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 50
EPOCHS = 200
SEED = 0
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
cleaned_qnlp.head(10)
cleaned_qnlp.info()
sns.countplot(x = "target", data = cleaned_qnlp)
cleaned_lemmatized_qnlp.head(10)
cleaned_lemmatized_qnlp.info()
sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
# parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
parser = TreeReader(mode=TreeReaderMode.RULE_TYPE)
NUM_DATA = 2578
loss = lambda y_hat, y: -np.sum(y * np.log(y_hat)) / len(y) # binary cross-entropy loss
acc = lambda y_hat, y: np.sum(np.round(y_hat) == y) / len(y) / 2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
data = cleaned__lemmatized_stemmed_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
alternate_parser = BobcatParser(verbose='text')
dig_0 = alternate_parser.sentence2diagram(cleaned__lemmatized_stemmed_qnlp['data'].tolist()[0])
grammar.draw(dig_0, figsize=(14,3), fontsize=12)
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
# train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
[(s, s.size) for s in train_circuits_1[0].free_symbols]
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
# model_1 = TketModel.from_diagrams(all_circuits_1, backend_config=backend_config)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
data = cleaned_lemmatized_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
data = cleaned_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import re
import spacy
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import random, unique
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import SpacyTokeniser
from pytket.extensions.qiskit import AerBackend
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
nltk.download('omw-1.4')
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
spacy.load('en_core_web_sm')
MAX_LENGTH = 5
BATCH_SIZE = 30
EPOCHS = 100
SEED = 0
random.seed(SEED)
def get_sent_length(sent):
if type(sent) is not str:
return 9999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
# Lemmatize
text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
# Stem
text_cleaned = [PorterStemmer().stem(w) for w in text_len]
return " ".join(text_cleaned)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
negative_train_df = df_train.loc[df_train["Target"]=="Negative"]
positive_train_df = df_train.loc[df_train["Target"]=='Positive']
if len(positive_train_df)>=len(negative_train_df):
positive_train_df = positive_train_df.head(len(negative_train_df))
else:
negative_train_df = negative_train_df.head(len(positive_train_df))
negative_val_df = df_val.loc[df_val['Target'] == 'Negative']
positive_val_df = df_val.loc[df_val['Target'] == 'Positive']
if len(positive_val_df)>=len(negative_val_df):
positive_val_df = positive_val_df.head(len(negative_val_df))
else:
negative_val_df = negative_val_df.head(len(positive_val_df))
df_train = pd.concat([positive_train_df, negative_train_df])
df_val = pd.concat([positive_val_df, negative_val_df])
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
df_train.info()
df_val.info()
df_train.head()
df_val.head()
sns.countplot(x = "Target", data = df_train)
sns.countplot(x = "Target", data = df_val)
train_data_all, train_label_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist()
dev_data, dev_labels = df_val["Text"].tolist(), df_val["Sentiment"].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
pairs = []
for c in zip(labels, data):
if len(c[1]) != 0 and len(c[1].split(" "))<=5:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
TRAIN_RATIO_INDEX = 0.8
TEST_RATIO_INDEX = TRAIN_RATIO_INDEX + 0.1
DEV_RATIO_INDEX = TEST_RATIO_INDEX + 0.1
train_labels, train_data = zip(*pairs[:round(N_EXAMPLES * TRAIN_RATIO_INDEX)])
dev_labels, dev_data = zip(*pairs[round(N_EXAMPLES * TRAIN_RATIO_INDEX):round(N_EXAMPLES * TEST_RATIO_INDEX)])
test_labels, test_data = zip(*pairs[round(N_EXAMPLES * TEST_RATIO_INDEX):round(N_EXAMPLES * DEV_RATIO_INDEX)])
print("Data selected for train: {}\nData selected for test: {}\nData selected for dev: {}".format(len(train_data), len(test_data), len(dev_data)))
# Function for replacing low occuring word(s) with <unk> token
def replace(box):
if isinstance(box, Word) and dataset.count(box.name) < 1:
return Word('unk', box.cod, box.dom)
return box
tokeniser = SpacyTokeniser()
train_data = tokeniser.tokenise_sentences(train_data)
dev_data = tokeniser.tokenise_sentences(dev_data)
test_data = tokeniser.tokenise_sentences(test_data)
for i in range(len(train_data)):
train_data[i] = ' '.join(train_data[i])
for i in range(len(dev_data)):
dev_data[i] = ' '.join(dev_data[i])
for i in range(len(test_data)):
test_data[i] = ' '.join(test_data[i])
# training set words (with repetition)
train_data_string = ' '.join(train_data)
train_data_list = train_data_string.split(' ')
# validation set words (with repetition)
dev_data_string = ' '.join(dev_data)
dev_data_list = dev_data_string.split(' ')
# test set words (with repetition)
test_data_string = ' '.join(test_data)
test_data_list = test_data_string.split(' ')
# dataset words (with repetition)
dataset = train_data_list + dev_data_list + test_data_list
# list of all unique words in the dataset
unique_words = unique(dataset)
# frequency for each unique word
counter = collections.Counter(dataset)
#print(counter)
replace_functor = Functor(ob=lambda x: x, ar=replace)
# parser = BobcatParser(verbose='text')
print(BobcatParser.available_models())
parser = spiders_reader
#parser = DepCCGParser()
#parser = cups_reader
raw_train_diagrams = []
new_train_labels = []
raw_dev_diagrams = []
new_dev_labels = []
raw_test_diagrams = []
new_test_labels = []
for sent, label in zip(train_data, train_labels):
try:
diag = parser.sentence2diagram(sent)
raw_train_diagrams.append(diag)
new_train_labels.append(label)
except:
print("Cannot be parsed in train: {}".format(sent))
for sent, label in zip(dev_data, dev_labels):
try:
diag = parser.sentence2diagram(sent)
raw_dev_diagrams.append(diag)
new_dev_labels.append(label)
except:
print("Cannot be parsed in dev: {}".format(sent))
for sent, label in zip(test_data, test_labels):
try:
diag = parser.sentence2diagram(sent)
raw_test_diagrams.append(diag)
new_test_labels.append(label)
except:
print("Cannot be parsed in test: {}".format(sent))
train_labels = new_train_labels
dev_labels = new_dev_labels
test_labels = new_test_labels
# # Tokenizing low occuring words in each dataset
for i in range(len(raw_train_diagrams)):
raw_train_diagrams[i] = replace_functor(raw_train_diagrams[i])
for i in range(len(raw_dev_diagrams)):
raw_dev_diagrams[i] = replace_functor(raw_dev_diagrams[i])
for i in range(len(raw_test_diagrams)):
raw_test_diagrams[i] = replace_functor(raw_test_diagrams[i])
# sample sentence diagram (entry 1)
raw_train_diagrams[0].draw()
# merging all diagrams into one for checking the new words
raw_all_diagrams = raw_train_diagrams + raw_dev_diagrams + raw_test_diagrams
# removing cups (after performing top-to-bottom scan of the word diagrams)
train_diagrams = [remove_cups(diagram) for diagram in raw_train_diagrams]
dev_diagrams = [remove_cups(diagram) for diagram in raw_dev_diagrams]
test_diagrams = [remove_cups(diagram) for diagram in raw_test_diagrams]
# sample sentence diagram (entry 1)
train_diagrams[0].draw()
ansatz = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
# train/test circuits
train_circuits = [ansatz(diagram) for diagram in train_diagrams]
dev_circuits = [ansatz(diagram) for diagram in dev_diagrams]
test_circuits = [ansatz(diagram) for diagram in test_diagrams]
# sample circuit diagram
train_circuits[0].draw(figsize=(9, 12))
all_circuits = train_circuits + dev_circuits + test_circuits
model = NumpyModel.from_diagrams(all_circuits, use_jit=True)
loss = lambda y_hat, y: -np.sum(y * np.log(y_hat)) / len(y) # binary cross-entropy loss
acc = lambda y_hat, y: np.sum(np.round(y_hat) == y) / len(y) / 2 # half due to double-counting
trainer = QuantumTrainer(
model,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset = Dataset(
train_circuits,
train_labels,
batch_size=BATCH_SIZE)
val_dataset = Dataset(dev_circuits, dev_labels, shuffle=False)
trainer.fit(train_dataset, val_dataset, logging_step=12)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(10, 6))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer.train_results['acc'], color=next(colours))
ax_tr.plot(trainer.val_costs, color=next(colours))
ax_br.plot(trainer.val_results['acc'], color=next(colours))
test_acc = acc(model(test_circuits), test_labels)
print('Test accuracy:', test_acc)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as jnp
import numpy as np
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 20
EPOCHS = 50
SEED = 0
TRAIN_INDEX_RATIO = 0.02
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.001
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.001
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
#cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
#cleaned_qnlp.head(10)
#cleaned_qnlp.info()
#sns.countplot(x = "target", data = cleaned_qnlp)
cleaned_lemmatized_qnlp.head(10)
cleaned_lemmatized_qnlp.info()
sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
# parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
parser = TreeReader()
NUM_DATA_1 = 2578
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(cleaned__lemmatized_stemmed_qnlp, NUM_DATA_1)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
alternate_parser = BobcatParser(verbose='text')
dig_0 = alternate_parser.sentence2diagram(cleaned__lemmatized_stemmed_qnlp['data'].tolist()[0])
grammar.draw(dig_0, figsize=(14,3), fontsize=12)
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
# train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
[(s, s.size) for s in train_circuits_1[0].free_symbols]
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
from sympy import default_sort_key
vocab_1 = sorted(
{sym for circ in all_circuits_1 for sym in circ.free_symbols},
key=default_sort_key
)
print(len(vocab_1))
params_1 = jnp.array(np.random.rand(len(vocab_1)))
from tqdm.notebook import tqdm
np_circuits = []
for c in tqdm(train_circuits_1):
np_circuits.append(c.lambdify(*vocab_1)(*params_1))
for c in tqdm(np_circuits):
print(c.eval().array)
def sigmoid(x):
return 1 / (1 + jnp.exp(-x))
def loss_1(tensors):
# Lambdify
np_circuits = [c.lambdify(*vocab_1)(*tensors) for c in train_circuits_1]
# Compute predictions
predictions = sigmoid(jnp.array([[jnp.real(jnp.conjugate(c.eval().array[0])*c.eval().array[0]), jnp.real(jnp.conjugate(c.eval().array[1])*c.eval().array[1])] for c in np_circuits]))
# binary cross-entropy loss
cost = -jnp.sum(train_targets_1 * jnp.log2(predictions)) / len(train_targets_1)
return cost
from jax import jit, grad
training_loss = jit(loss_1)
gradient = jit(grad(loss_1))
training_losses = []
LR = 1.0
for i in range(EPOCHS):
gr = gradient(params_1)
params_1 = params_1 - LR*gr
training_losses.append(float(training_loss(params_1)))
if (i + 1) % 1 == 0:
print(f"Epoch {i + 1} - loss {training_losses[-1]}")
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as jnp
import torch
import numpy as np
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader,SpiderAnsatz
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel, PytorchModel, PytorchTrainer
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 20
EPOCHS = 100
SEED = 0
LEARNING_RATE = 3e-2
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
#cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
#cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
#cleaned_qnlp.head(10)
#cleaned_qnlp.info()
#sns.countplot(x = "target", data = cleaned_qnlp)
#cleaned_lemmatized_qnlp.head(10)
#cleaned_lemmatized_qnlp.info()
#sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
NUM_DATA = 2578
sig = torch.sigmoid
def accuracy(y_hat, y):
return torch.sum(torch.eq(torch.round(sig(y_hat)), y))/len(y)/2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(cleaned__lemmatized_stemmed_qnlp)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
# ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
ansatz_1 = SpiderAnsatz({AtomicType.NOUN: 2, AtomicType.SENTENCE: 2, AtomicType.PREPOSITIONAL_PHRASE: 2, AtomicType.NOUN_PHRASE:2, AtomicType.CONJUNCTION:2})
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = PytorchModel.from_diagrams(all_circuits_1)
trainer_1 = PytorchTrainer(
model=model_1,
loss_function=torch.nn.BCEWithLogitsLoss(),
optimizer=torch.optim.AdamW, # type: ignore
learning_rate=LEARNING_RATE,
epochs=EPOCHS,
evaluate_functions={"acc": accuracy},
evaluate_on_train=True,
verbose='text',
seed=SEED)
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=5)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
from tqdm.notebook import tqdm
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import re
import spacy
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import random, unique
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import SpacyTokeniser
from pytket.extensions.qiskit import AerBackend
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
nltk.download('omw-1.4')
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
spacy.load('en_core_web_sm')
TOTAL_DATA_RATIO = 0.1 # only use part of the data
MAX_LENGTH = 10 # only use short tweets
def get_sent_length(sent):
if type(sent) is not str:
return 9999999999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
# Lemmatize
text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
# Stem
text_cleaned = [PorterStemmer().stem(w) for w in text_len]
return " ".join(text_cleaned)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("i'm","i am")
df_train["Text"]= df_train["Text"].str.replace("I'm","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"] = df_val["Text"].str.replace("i'm","i am")
df_val["Text"] = df_val["Text"].str.replace("I'm","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
df_train.info()
df_val.info()
df_train.head()
df_val.head()
sns.countplot(x = "Target", data = df_train)
sns.countplot(x = "Target", data = df_val)
train_data_all, train_label_all, train_target_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist(), df_train['Target'].tolist()
dev_data, dev_labels, dev_target = df_val["Text"].tolist(), df_val["Sentiment"].tolist(), df_val['Target'].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
targets = train_target_all+dev_target
pairs = []
for c in zip(labels, data, targets):
if len(c[1]) > 0:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
parser = BobcatParser(verbose='text')
new_data = []
new_label = []
new_target = []
i = 0 # positive
j = 0 # negative
for label, sent, target in tqdm(pairs):
try:
diag = parser.sentence2diagram(sent)
except:
pass
else:
sent_length = len(sent.split(" "))
if i>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2 and j>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
break
if target == "Positive" and i<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
i = i + 1
if target == 'Negative' and j<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
j = j + 1
cleaned_qnlp_data = {"data":new_data, "label":new_label, "target":new_target}
pickle.dump(cleaned_qnlp_data, open("cleaned_qnlp_data_stem_lematize.pkl", "wb" ))
def get_sent_length(sent):
if type(sent) is not str:
return 9999999999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
return " ".join(text_len)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("i'm","i am")
df_train["Text"]= df_train["Text"].str.replace("I'm","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"] = df_val["Text"].str.replace("i'm","i am")
df_val["Text"] = df_val["Text"].str.replace("I'm","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
train_data_all, train_label_all, train_target_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist(), df_train['Target'].tolist()
dev_data, dev_labels, dev_target = df_val["Text"].tolist(), df_val["Sentiment"].tolist(), df_val['Target'].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
targets = train_target_all+dev_target
pairs = []
for c in zip(labels, data, targets):
if len(c[1]) > 0:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
new_data = []
new_label = []
new_target = []
i = 0 # positive
j = 0 # negative
parser = BobcatParser(verbose='text')
for label, sent, target in tqdm(pairs):
try:
diag = parser.sentence2diagram(sent)
except:
pass
else:
sent_length = len(sent.split(" "))
if i>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2 and j>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
break
if target == "Positive" and i<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
i = i + 1
if target == 'Negative' and j<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
j = j + 1
cleaned_qnlp_data = {"data":new_data, "label":new_label, "target":new_target}
pickle.dump(cleaned_qnlp_data, open("cleaned_qnlp_data_lematize.pkl", "wb" ))
def get_sent_length(sent):
if type(sent) is not str:
return 9999999999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
# text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
return " ".join(without_punc)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("i'm","i am")
df_train["Text"]= df_train["Text"].str.replace("I'm","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"] = df_val["Text"].str.replace("i'm","i am")
df_val["Text"] = df_val["Text"].str.replace("I'm","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
train_data_all, train_label_all, train_target_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist(), df_train['Target'].tolist()
dev_data, dev_labels, dev_target = df_val["Text"].tolist(), df_val["Sentiment"].tolist(), df_val['Target'].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
targets = train_target_all+dev_target
pairs = []
for c in zip(labels, data, targets):
if len(c[1]) > 0:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
new_data = []
new_label = []
new_target = []
i = 0 # positive
j = 0 # negative
parser = BobcatParser(verbose='text')
for label, sent, target in tqdm(pairs):
try:
diag = parser.sentence2diagram(sent)
except:
pass
else:
sent_length = len(sent.split(" "))
if i>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2 and j>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
break
if target == "Positive" and i<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
i = i + 1
if target == 'Negative' and j<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
j = j + 1
cleaned_qnlp_data = {"data":new_data, "label":new_label, "target":new_target}
pickle.dump(cleaned_qnlp_data, open("cleaned_qnlp_data.pkl", "wb" ))
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
train_df = pd.read_csv("data/preprocessed/train_new.csv", index_col=None)
dev_df = pd.read_csv("data/preprocessed/dev_new.csv", index_col=None)
print(len(train_df), len(dev_df))
train_df = train_df.dropna()
dev_df = dev_df.dropna()
print(len(train_df), len(dev_df))
train_df.info()
train_df['Text'] == train_df['Text'].astype(str)
dev_df['Text'] == dev_df['Text'].astype(str)
train_df.head()
from sklearn.feature_extraction.text import TfidfVectorizer
def whitespace_tokenizer(text: str):
return text.split()
train_texts = train_df['Text']
train_labels = train_df['Target']
dev_texts = dev_df['Text']
dev_labels = dev_df['Target']
# get tf-idf vectors
tfidf_vectorizer = TfidfVectorizer(tokenizer=whitespace_tokenizer)
train_tfidf = tfidf_vectorizer.fit_transform(train_texts)
dev_tfidf = tfidf_vectorizer.transform(dev_texts)
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
for c in [0.001, 0.01, 0.1, 1, 10, 100, 1000]:
lsvc = LinearSVC(C=c)
lsvc.fit(train_tfidf, train_labels)
preds = lsvc.predict(dev_tfidf)
print(f"C={c:6}, acc: {accuracy_score(dev_labels, preds):.3f}")
for c in [0.001, 0.01, 0.1, 1, 10, 100, 1000]:
lr = LogisticRegression(C=c)
lr.fit(train_tfidf, train_labels)
preds = lr.predict(dev_tfidf)
print(f"C={c:6}, acc: {accuracy_score(dev_labels, preds):.3f}")
for a in [0.001, 0.01, 0.1, 1, 10, 100, 1000]:
nb = MultinomialNB(alpha=a)
nb.fit(train_tfidf, train_labels)
preds = nb.predict(dev_tfidf)
print(f"alpha={a: 6}, acc: {accuracy_score(dev_labels, preds):.3f}")
for n_neighbors in range(1, 10):
for weights in ['uniform', 'distance']:
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(train_tfidf, train_labels)
preds = knn.predict(dev_tfidf)
print(f"n_neighbors={n_neighbors: 2}, weights={weights:9}, acc: {accuracy_score(dev_labels, preds):.3f}")
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import re
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk import pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
import os
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=120
print(os.getcwd())
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv("/app/data/twitter_training.csv", names=columns,header=None)
data.head()
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative')]
df_train.head()
df_train.info()
df_train= df_train.drop_duplicates()
df_train.info()
sns.countplot(x="Target",data=df_train)
data_val = pd.read_csv("/app/data/twitter_validation.csv", names=columns,header=None)
data_val.head()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative')]
df_val.head()
df_val.info()
sns.countplot(x="Target",data=df_val)
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
stemmer = SnowballStemmer('english')
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
text = emoji_pattern.sub(r'', text)
tokens = []
for token in text.split():
tokens.append(token)
return " ".join(tokens)
df_train["Text"] = df_train["Text"].apply(preprocess)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"].head()
df_val["Text"] = df_val["Text"].apply(preprocess)
df_val["Text"]=df_val["Text"].str.replace("im","i am")
df_val["Text"].head()
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import tarfile
from urllib.request import urlretrieve
from depccg.instance_models import MODEL_DIRECTORY
URL = 'https://qnlp.cambridgequantum.com/models/tri_headfirst.tar.gz'
print('Please consider using Bobcat, the parser included with lambeq,\n'
'instead of depccg.')
def print_progress(chunk: int, chunk_size: int, size: int) -> None:
percentage = chunk * chunk_size / size
mb_size = size / 10**6
print(f'\rDownloading model... {percentage:.1%} of {mb_size:.1f} MB',
end='')
print(MODEL_DIRECTORY)
print('Downloading model...', end='')
download, _ = urlretrieve(URL, reporthook=print_progress)
print('\nExtracting model...')
tarfile.open(download).extractall(MODEL_DIRECTORY)
print('Download successful')
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as np
import numpy
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser, TreeReaderMode
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 50
EPOCHS = 200
SEED = 0
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
cleaned_qnlp.head(10)
cleaned_qnlp.info()
sns.countplot(x = "target", data = cleaned_qnlp)
cleaned_lemmatized_qnlp.head(10)
cleaned_lemmatized_qnlp.info()
sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
# parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
parser = TreeReader(mode=TreeReaderMode.RULE_TYPE)
NUM_DATA = 2578
loss = lambda y_hat, y: -np.sum(y * np.log(y_hat)) / len(y) # binary cross-entropy loss
acc = lambda y_hat, y: np.sum(np.round(y_hat) == y) / len(y) / 2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
data = cleaned__lemmatized_stemmed_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
alternate_parser = BobcatParser(verbose='text')
dig_0 = alternate_parser.sentence2diagram(cleaned__lemmatized_stemmed_qnlp['data'].tolist()[0])
grammar.draw(dig_0, figsize=(14,3), fontsize=12)
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
# train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
[(s, s.size) for s in train_circuits_1[0].free_symbols]
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
# model_1 = TketModel.from_diagrams(all_circuits_1, backend_config=backend_config)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
data = cleaned_lemmatized_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
data = cleaned_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as np
import numpy
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser, TreeReaderMode
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 50
EPOCHS = 200
SEED = 0
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
cleaned_qnlp.head(10)
cleaned_qnlp.info()
sns.countplot(x = "target", data = cleaned_qnlp)
cleaned_lemmatized_qnlp.head(10)
cleaned_lemmatized_qnlp.info()
sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
# parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
parser = TreeReader(mode=TreeReaderMode.RULE_TYPE)
NUM_DATA = 2578
loss = lambda y_hat, y: -np.sum(y * np.log(y_hat)) / len(y) # binary cross-entropy loss
acc = lambda y_hat, y: np.sum(np.round(y_hat) == y) / len(y) / 2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
data = cleaned__lemmatized_stemmed_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
alternate_parser = BobcatParser(verbose='text')
dig_0 = alternate_parser.sentence2diagram(cleaned__lemmatized_stemmed_qnlp['data'].tolist()[0])
grammar.draw(dig_0, figsize=(14,3), fontsize=12)
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
# train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
[(s, s.size) for s in train_circuits_1[0].free_symbols]
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
# model_1 = TketModel.from_diagrams(all_circuits_1, backend_config=backend_config)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
data = cleaned_lemmatized_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
data = cleaned_qnlp
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(data)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
render_circuit_jupyter(train_circuits_1[0].to_tk())
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = NumpyModel.from_diagrams(all_circuits_1, use_jit=True)
trainer_1 = QuantumTrainer(
model_1,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=1)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import re
import spacy
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import random, unique
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import SpacyTokeniser
from pytket.extensions.qiskit import AerBackend
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
nltk.download('omw-1.4')
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
spacy.load('en_core_web_sm')
MAX_LENGTH = 5
BATCH_SIZE = 30
EPOCHS = 100
SEED = 0
random.seed(SEED)
def get_sent_length(sent):
if type(sent) is not str:
return 9999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
# Lemmatize
text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
# Stem
text_cleaned = [PorterStemmer().stem(w) for w in text_len]
return " ".join(text_cleaned)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
negative_train_df = df_train.loc[df_train["Target"]=="Negative"]
positive_train_df = df_train.loc[df_train["Target"]=='Positive']
if len(positive_train_df)>=len(negative_train_df):
positive_train_df = positive_train_df.head(len(negative_train_df))
else:
negative_train_df = negative_train_df.head(len(positive_train_df))
negative_val_df = df_val.loc[df_val['Target'] == 'Negative']
positive_val_df = df_val.loc[df_val['Target'] == 'Positive']
if len(positive_val_df)>=len(negative_val_df):
positive_val_df = positive_val_df.head(len(negative_val_df))
else:
negative_val_df = negative_val_df.head(len(positive_val_df))
df_train = pd.concat([positive_train_df, negative_train_df])
df_val = pd.concat([positive_val_df, negative_val_df])
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
df_train.info()
df_val.info()
df_train.head()
df_val.head()
sns.countplot(x = "Target", data = df_train)
sns.countplot(x = "Target", data = df_val)
train_data_all, train_label_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist()
dev_data, dev_labels = df_val["Text"].tolist(), df_val["Sentiment"].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
pairs = []
for c in zip(labels, data):
if len(c[1]) != 0 and len(c[1].split(" "))<=5:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
TRAIN_RATIO_INDEX = 0.8
TEST_RATIO_INDEX = TRAIN_RATIO_INDEX + 0.1
DEV_RATIO_INDEX = TEST_RATIO_INDEX + 0.1
train_labels, train_data = zip(*pairs[:round(N_EXAMPLES * TRAIN_RATIO_INDEX)])
dev_labels, dev_data = zip(*pairs[round(N_EXAMPLES * TRAIN_RATIO_INDEX):round(N_EXAMPLES * TEST_RATIO_INDEX)])
test_labels, test_data = zip(*pairs[round(N_EXAMPLES * TEST_RATIO_INDEX):round(N_EXAMPLES * DEV_RATIO_INDEX)])
print("Data selected for train: {}\nData selected for test: {}\nData selected for dev: {}".format(len(train_data), len(test_data), len(dev_data)))
# Function for replacing low occuring word(s) with <unk> token
def replace(box):
if isinstance(box, Word) and dataset.count(box.name) < 1:
return Word('unk', box.cod, box.dom)
return box
tokeniser = SpacyTokeniser()
train_data = tokeniser.tokenise_sentences(train_data)
dev_data = tokeniser.tokenise_sentences(dev_data)
test_data = tokeniser.tokenise_sentences(test_data)
for i in range(len(train_data)):
train_data[i] = ' '.join(train_data[i])
for i in range(len(dev_data)):
dev_data[i] = ' '.join(dev_data[i])
for i in range(len(test_data)):
test_data[i] = ' '.join(test_data[i])
# training set words (with repetition)
train_data_string = ' '.join(train_data)
train_data_list = train_data_string.split(' ')
# validation set words (with repetition)
dev_data_string = ' '.join(dev_data)
dev_data_list = dev_data_string.split(' ')
# test set words (with repetition)
test_data_string = ' '.join(test_data)
test_data_list = test_data_string.split(' ')
# dataset words (with repetition)
dataset = train_data_list + dev_data_list + test_data_list
# list of all unique words in the dataset
unique_words = unique(dataset)
# frequency for each unique word
counter = collections.Counter(dataset)
#print(counter)
replace_functor = Functor(ob=lambda x: x, ar=replace)
# parser = BobcatParser(verbose='text')
print(BobcatParser.available_models())
parser = spiders_reader
#parser = DepCCGParser()
#parser = cups_reader
raw_train_diagrams = []
new_train_labels = []
raw_dev_diagrams = []
new_dev_labels = []
raw_test_diagrams = []
new_test_labels = []
for sent, label in zip(train_data, train_labels):
try:
diag = parser.sentence2diagram(sent)
raw_train_diagrams.append(diag)
new_train_labels.append(label)
except:
print("Cannot be parsed in train: {}".format(sent))
for sent, label in zip(dev_data, dev_labels):
try:
diag = parser.sentence2diagram(sent)
raw_dev_diagrams.append(diag)
new_dev_labels.append(label)
except:
print("Cannot be parsed in dev: {}".format(sent))
for sent, label in zip(test_data, test_labels):
try:
diag = parser.sentence2diagram(sent)
raw_test_diagrams.append(diag)
new_test_labels.append(label)
except:
print("Cannot be parsed in test: {}".format(sent))
train_labels = new_train_labels
dev_labels = new_dev_labels
test_labels = new_test_labels
# # Tokenizing low occuring words in each dataset
for i in range(len(raw_train_diagrams)):
raw_train_diagrams[i] = replace_functor(raw_train_diagrams[i])
for i in range(len(raw_dev_diagrams)):
raw_dev_diagrams[i] = replace_functor(raw_dev_diagrams[i])
for i in range(len(raw_test_diagrams)):
raw_test_diagrams[i] = replace_functor(raw_test_diagrams[i])
# sample sentence diagram (entry 1)
raw_train_diagrams[0].draw()
# merging all diagrams into one for checking the new words
raw_all_diagrams = raw_train_diagrams + raw_dev_diagrams + raw_test_diagrams
# removing cups (after performing top-to-bottom scan of the word diagrams)
train_diagrams = [remove_cups(diagram) for diagram in raw_train_diagrams]
dev_diagrams = [remove_cups(diagram) for diagram in raw_dev_diagrams]
test_diagrams = [remove_cups(diagram) for diagram in raw_test_diagrams]
# sample sentence diagram (entry 1)
train_diagrams[0].draw()
ansatz = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
# train/test circuits
train_circuits = [ansatz(diagram) for diagram in train_diagrams]
dev_circuits = [ansatz(diagram) for diagram in dev_diagrams]
test_circuits = [ansatz(diagram) for diagram in test_diagrams]
# sample circuit diagram
train_circuits[0].draw(figsize=(9, 12))
all_circuits = train_circuits + dev_circuits + test_circuits
model = NumpyModel.from_diagrams(all_circuits, use_jit=True)
loss = lambda y_hat, y: -np.sum(y * np.log(y_hat)) / len(y) # binary cross-entropy loss
acc = lambda y_hat, y: np.sum(np.round(y_hat) == y) / len(y) / 2 # half due to double-counting
trainer = QuantumTrainer(
model,
loss_function=loss,
epochs=EPOCHS,
optimizer=SPSAOptimizer,
optim_hyperparams={'a': 0.2, 'c': 0.06, 'A':0.01*EPOCHS},
evaluate_functions={'acc': acc},
evaluate_on_train=True,
verbose = 'text',
seed=0
)
train_dataset = Dataset(
train_circuits,
train_labels,
batch_size=BATCH_SIZE)
val_dataset = Dataset(dev_circuits, dev_labels, shuffle=False)
trainer.fit(train_dataset, val_dataset, logging_step=12)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(10, 6))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer.train_results['acc'], color=next(colours))
ax_tr.plot(trainer.val_costs, color=next(colours))
ax_br.plot(trainer.val_results['acc'], color=next(colours))
test_acc = acc(model(test_circuits), test_labels)
print('Test accuracy:', test_acc)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as jnp
import numpy as np
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 20
EPOCHS = 50
SEED = 0
TRAIN_INDEX_RATIO = 0.02
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.001
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.001
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
#cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
#cleaned_qnlp.head(10)
#cleaned_qnlp.info()
#sns.countplot(x = "target", data = cleaned_qnlp)
cleaned_lemmatized_qnlp.head(10)
cleaned_lemmatized_qnlp.info()
sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
# parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
parser = TreeReader()
NUM_DATA_1 = 2578
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(cleaned__lemmatized_stemmed_qnlp, NUM_DATA_1)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
alternate_parser = BobcatParser(verbose='text')
dig_0 = alternate_parser.sentence2diagram(cleaned__lemmatized_stemmed_qnlp['data'].tolist()[0])
grammar.draw(dig_0, figsize=(14,3), fontsize=12)
ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
# train_circuits_1[0].draw(figsize=(9, 12))
render_circuit_jupyter(train_circuits_1[0].to_tk())
[(s, s.size) for s in train_circuits_1[0].free_symbols]
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
from sympy import default_sort_key
vocab_1 = sorted(
{sym for circ in all_circuits_1 for sym in circ.free_symbols},
key=default_sort_key
)
print(len(vocab_1))
params_1 = jnp.array(np.random.rand(len(vocab_1)))
from tqdm.notebook import tqdm
np_circuits = []
for c in tqdm(train_circuits_1):
np_circuits.append(c.lambdify(*vocab_1)(*params_1))
for c in tqdm(np_circuits):
print(c.eval().array)
def sigmoid(x):
return 1 / (1 + jnp.exp(-x))
def loss_1(tensors):
# Lambdify
np_circuits = [c.lambdify(*vocab_1)(*tensors) for c in train_circuits_1]
# Compute predictions
predictions = sigmoid(jnp.array([[jnp.real(jnp.conjugate(c.eval().array[0])*c.eval().array[0]), jnp.real(jnp.conjugate(c.eval().array[1])*c.eval().array[1])] for c in np_circuits]))
# binary cross-entropy loss
cost = -jnp.sum(train_targets_1 * jnp.log2(predictions)) / len(train_targets_1)
return cost
from jax import jit, grad
training_loss = jit(loss_1)
gradient = jit(grad(loss_1))
training_losses = []
LR = 1.0
for i in range(EPOCHS):
gr = gradient(params_1)
params_1 = params_1 - LR*gr
training_losses.append(float(training_loss(params_1)))
if (i + 1) % 1 == 0:
print(f"Epoch {i + 1} - loss {training_losses[-1]}")
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import random
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
from discopy import grammar
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from jax import numpy as jnp
import torch
import numpy as np
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader,SpiderAnsatz
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel, PytorchModel, PytorchTrainer
from lambeq import Rewriter
from pytket.extensions.qiskit import AerBackend
import seaborn as sns
import matplotlib.pyplot as plt
from pytket.circuit.display import render_circuit_jupyter
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
BATCH_SIZE = 20
EPOCHS = 100
SEED = 0
LEARNING_RATE = 3e-2
TRAIN_INDEX_RATIO = 0.08
VAL_INDEX_RATIO = TRAIN_INDEX_RATIO + 0.01
TEST_INDEX_RATIO = VAL_INDEX_RATIO + 0.01
assert TEST_INDEX_RATIO <= 1
def load_pickled_dict_to_df(filename):
saved_dict = pickle.load(open(filename, 'rb'))
df = pd.DataFrame.from_dict(saved_dict)
df = df.sample(frac=1, random_state=SEED).reset_index(drop=True)
sentiment = []
for i in df['target']:
if i == "Positive":
sentiment.append(1)
else:
sentiment.append(0)
df["Sentiment"] = sentiment
return df
cleaned_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data.pkl')
cleaned_lemmatized_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_lematize.pkl')
cleaned_lemmatized_stemmed_qnlp_filename = os.path.join(os.getcwd(), 'cleaned_qnlp_data_stem_lematize.pkl')
#cleaned_qnlp = load_pickled_dict_to_df(cleaned_qnlp_filename)
#cleaned_lemmatized_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_qnlp_filename)
cleaned__lemmatized_stemmed_qnlp = load_pickled_dict_to_df(cleaned_lemmatized_stemmed_qnlp_filename)
#cleaned_qnlp.head(10)
#cleaned_qnlp.info()
#sns.countplot(x = "target", data = cleaned_qnlp)
#cleaned_lemmatized_qnlp.head(10)
#cleaned_lemmatized_qnlp.info()
#sns.countplot(x='target', data = cleaned_lemmatized_qnlp)
cleaned__lemmatized_stemmed_qnlp.head(10)
cleaned__lemmatized_stemmed_qnlp.info()
sns.countplot(x='target', data = cleaned__lemmatized_stemmed_qnlp)
parser = BobcatParser(verbose='text')
# parser = DepCCGParser(root_cats=['S[dcl]'])
# parser = spiders_reader
NUM_DATA = 2578
sig = torch.sigmoid
def accuracy(y_hat, y):
return torch.sum(torch.eq(torch.round(sig(y_hat)), y))/len(y)/2 # half due to double-counting
rewriter = Rewriter(['prepositional_phrase', 'determiner', 'auxiliary', 'connector',
'coordination', 'object_rel_pronoun', 'subject_rel_pronoun',
'postadverb', 'preadverb'])
def rewrite(diagram):
# diagram = rewriter(diagram)
return remove_cups(diagram)
def create_diagrams_and_labels(total_df, NUM_DATA = 2578):
total_text = total_df['data'].tolist()
total_labels = total_df["Sentiment"].tolist()
total_labels = [[t, 1-t] for t in total_labels] # [1, 0] for positive, [0, 1] for negative
train_diagrams = parser.sentences2diagrams(total_text[:round(NUM_DATA*TRAIN_INDEX_RATIO)])
train_labels = total_labels[:round(NUM_DATA*TRAIN_INDEX_RATIO)]
dev_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)])
dev_labels = total_labels[round(NUM_DATA*TRAIN_INDEX_RATIO):round(NUM_DATA*VAL_INDEX_RATIO)]
test_diagrams = parser.sentences2diagrams(total_text[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)])
test_labels = total_labels[round(NUM_DATA*VAL_INDEX_RATIO):round(NUM_DATA*TEST_INDEX_RATIO)]
return train_diagrams, train_labels, dev_diagrams, dev_labels, test_diagrams, test_labels
raw_train_diagrams_1, train_labels_1, raw_dev_diagrams_1, dev_labels_1, raw_test_diagrams_1, test_labels_1 = create_diagrams_and_labels(cleaned__lemmatized_stemmed_qnlp)
print(len(raw_train_diagrams_1))
raw_train_diagrams_1[0].draw(figsize=(12,3))
train_diagrams_1 = [rewrite(diagram) for diagram in raw_train_diagrams_1]
dev_diagrams_1 = [rewrite(diagram) for diagram in raw_dev_diagrams_1]
test_diagrams_1 = [rewrite(diagram) for diagram in raw_test_diagrams_1]
train_diagrams_1[0].draw(figsize=(6,5))
# ansatz_1 = IQPAnsatz({AtomicType.NOUN: 1, AtomicType.SENTENCE: 1, AtomicType.PREPOSITIONAL_PHRASE: 1, AtomicType.NOUN_PHRASE:1, AtomicType.CONJUNCTION:1}, n_layers=1, n_single_qubit_params=3)
ansatz_1 = SpiderAnsatz({AtomicType.NOUN: 2, AtomicType.SENTENCE: 2, AtomicType.PREPOSITIONAL_PHRASE: 2, AtomicType.NOUN_PHRASE:2, AtomicType.CONJUNCTION:2})
train_circuits_1 = [ansatz_1(diagram) for diagram in train_diagrams_1]
dev_circuits_1 = [ansatz_1(diagram) for diagram in dev_diagrams_1]
test_circuits_1 = [ansatz_1(diagram) for diagram in test_diagrams_1]
train_circuits_1[0].draw(figsize=(9, 12))
all_circuits_1 = train_circuits_1 + dev_circuits_1 + test_circuits_1
model_1 = PytorchModel.from_diagrams(all_circuits_1)
trainer_1 = PytorchTrainer(
model=model_1,
loss_function=torch.nn.BCEWithLogitsLoss(),
optimizer=torch.optim.AdamW, # type: ignore
learning_rate=LEARNING_RATE,
epochs=EPOCHS,
evaluate_functions={"acc": accuracy},
evaluate_on_train=True,
verbose='text',
seed=SEED)
)
train_dataset_1 = Dataset(
train_circuits_1,
train_labels_1,
batch_size=BATCH_SIZE)
val_dataset_1 = Dataset(dev_circuits_1, dev_labels_1, shuffle=False)
trainer_1.fit(train_dataset_1, val_dataset_1, logging_step=5)
fig, ((ax_tl, ax_tr), (ax_bl, ax_br)) = plt.subplots(2, 2, sharex=True, sharey='row', figsize=(12, 8))
ax_tl.set_title('Training set')
ax_tr.set_title('Development set')
ax_bl.set_xlabel('Iterations')
ax_br.set_xlabel('Iterations')
ax_bl.set_ylabel('Accuracy')
ax_tl.set_ylabel('Loss')
colours = iter(plt.rcParams['axes.prop_cycle'].by_key()['color'])
ax_tl.plot(trainer_1.train_epoch_costs, color=next(colours))
ax_bl.plot(trainer_1.train_results['acc'], color=next(colours))
ax_tr.plot(trainer_1.val_costs, color=next(colours))
ax_br.plot(trainer_1.val_results['acc'], color=next(colours))
test_acc_1 = acc(model_1(test_circuits_1), test_labels_1)
print('Test accuracy:', test_acc_1)
|
https://github.com/peiyong-addwater/Hackathon-QNLP
|
peiyong-addwater
|
import collections
import pickle
from tqdm.notebook import tqdm
import warnings
warnings.filterwarnings("ignore")
import os
from random import shuffle
import re
import spacy
from discopy.tensor import Tensor
from discopy import Word
from discopy.rigid import Functor
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import random, unique
from lambeq import AtomicType, IQPAnsatz, remove_cups, NumpyModel, spiders_reader
from lambeq import BobcatParser, TreeReader, cups_reader, DepCCGParser
from lambeq import Dataset
from lambeq import QuantumTrainer, SPSAOptimizer
from lambeq import TketModel
from lambeq import SpacyTokeniser
from pytket.extensions.qiskit import AerBackend
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
nltk.download('omw-1.4')
pd.set_option('display.width', 1000)
pd.options.display.max_colwidth=80
print(os.getcwd())
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
spacy.load('en_core_web_sm')
TOTAL_DATA_RATIO = 0.1 # only use part of the data
MAX_LENGTH = 10 # only use short tweets
def get_sent_length(sent):
if type(sent) is not str:
return 9999999999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
# Lemmatize
text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
# Stem
text_cleaned = [PorterStemmer().stem(w) for w in text_len]
return " ".join(text_cleaned)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("i'm","i am")
df_train["Text"]= df_train["Text"].str.replace("I'm","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"] = df_val["Text"].str.replace("i'm","i am")
df_val["Text"] = df_val["Text"].str.replace("I'm","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
df_train.info()
df_val.info()
df_train.head()
df_val.head()
sns.countplot(x = "Target", data = df_train)
sns.countplot(x = "Target", data = df_val)
train_data_all, train_label_all, train_target_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist(), df_train['Target'].tolist()
dev_data, dev_labels, dev_target = df_val["Text"].tolist(), df_val["Sentiment"].tolist(), df_val['Target'].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
targets = train_target_all+dev_target
pairs = []
for c in zip(labels, data, targets):
if len(c[1]) > 0:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
parser = BobcatParser(verbose='text')
new_data = []
new_label = []
new_target = []
i = 0 # positive
j = 0 # negative
for label, sent, target in tqdm(pairs):
try:
diag = parser.sentence2diagram(sent)
except:
pass
else:
sent_length = len(sent.split(" "))
if i>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2 and j>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
break
if target == "Positive" and i<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
i = i + 1
if target == 'Negative' and j<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
j = j + 1
cleaned_qnlp_data = {"data":new_data, "label":new_label, "target":new_target}
pickle.dump(cleaned_qnlp_data, open("cleaned_qnlp_data_stem_lematize.pkl", "wb" ))
def get_sent_length(sent):
if type(sent) is not str:
return 9999999999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
return " ".join(text_len)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("i'm","i am")
df_train["Text"]= df_train["Text"].str.replace("I'm","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"] = df_val["Text"].str.replace("i'm","i am")
df_val["Text"] = df_val["Text"].str.replace("I'm","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
train_data_all, train_label_all, train_target_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist(), df_train['Target'].tolist()
dev_data, dev_labels, dev_target = df_val["Text"].tolist(), df_val["Sentiment"].tolist(), df_val['Target'].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
targets = train_target_all+dev_target
pairs = []
for c in zip(labels, data, targets):
if len(c[1]) > 0:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
new_data = []
new_label = []
new_target = []
i = 0 # positive
j = 0 # negative
parser = BobcatParser(verbose='text')
for label, sent, target in tqdm(pairs):
try:
diag = parser.sentence2diagram(sent)
except:
pass
else:
sent_length = len(sent.split(" "))
if i>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2 and j>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
break
if target == "Positive" and i<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
i = i + 1
if target == 'Negative' and j<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
j = j + 1
cleaned_qnlp_data = {"data":new_data, "label":new_label, "target":new_target}
pickle.dump(cleaned_qnlp_data, open("cleaned_qnlp_data_lematize.pkl", "wb" ))
def get_sent_length(sent):
if type(sent) is not str:
return 9999999999999
word_list = sent.split(" ")
return len(word_list)
columns = ["Id","Entity","Target","Text"]
data = pd.read_csv(os.path.join(os.getcwd(),"data/twitter_training.csv"), names=columns,header=None)
#data = data.sample(frac=1).reset_index(drop=True)
data_val = pd.read_csv(os.path.join(os.getcwd(), "data/twitter_validation.csv"), names=columns,header=None)
#data_val = data.sample(frac=1).reset_index(drop=True)
df_train = data[["Text","Target"]]
df_train = df_train.loc[(df_train["Target"]=='Positive') | (df_train["Target"]=='Negative') & (df_train["Text"]!=np.nan)&(df_train["Text"].map(get_sent_length)<=MAX_LENGTH)]
df_train= df_train.drop_duplicates()
df_val = data_val[['Text', 'Target']]
df_val = df_val.loc[(df_val['Target'] == 'Positive') | (df_val['Target'] == 'Negative') & (df_val["Text"]!=np.nan)&(df_val["Text"].map(get_sent_length)<=MAX_LENGTH)]
text_cleaning_re = "@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+"
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def preprocess(text):
text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip()
without_emoji = emoji_pattern.sub(r'', text)
tokens = word_tokenize(str(without_emoji).replace("'", "").lower())
# Remove Puncs
without_punc = [w for w in tokens if w.isalpha()]
# text_len = [WordNetLemmatizer().lemmatize(t) for t in without_punc]
return " ".join(without_punc)
df_train["Text"]= df_train["Text"].str.replace("im","i am")
df_train["Text"]= df_train["Text"].str.replace("i'm","i am")
df_train["Text"]= df_train["Text"].str.replace("I'm","i am")
df_train["Text"]= df_train["Text"].str.replace("it's","it is")
df_train["Text"]= df_train["Text"].str.replace("you're","you are")
df_train["Text"]= df_train["Text"].str.replace("hasn't","has not")
df_train["Text"]= df_train["Text"].str.replace("haven't","have not")
df_train["Text"]= df_train["Text"].str.replace("don't","do not")
df_train["Text"]= df_train["Text"].str.replace("doesn't","does not")
df_train["Text"]= df_train["Text"].str.replace("won't","will not")
df_train["Text"]= df_train["Text"].str.replace("shouldn't","should not")
df_train["Text"]= df_train["Text"].str.replace("can't","can not")
df_train["Text"]= df_train["Text"].str.replace("couldn't","could not")
df_val["Text"] = df_val["Text"].str.replace("im","i am")
df_val["Text"] = df_val["Text"].str.replace("i'm","i am")
df_val["Text"] = df_val["Text"].str.replace("I'm","i am")
df_val["Text"]= df_val["Text"].str.replace("it's","it is")
df_val["Text"]= df_val["Text"].str.replace("you're","you are")
df_val["Text"]= df_val["Text"].str.replace("hasn't","has not")
df_val["Text"]= df_val["Text"].str.replace("haven't","have not")
df_val["Text"] = df_val["Text"].str.replace("don't","do not")
df_val["Text"] = df_val["Text"].str.replace("doesn't","does not")
df_val["Text"] = df_val["Text"].str.replace("won't","will not")
df_val["Text"] = df_val["Text"].str.replace("shouldn't","should not")
df_val["Text"] = df_val["Text"].str.replace("can't","can not")
df_val["Text"] = df_val["Text"].str.replace("couldn't","could not")
df_train["Text"] = df_train["Text"].apply(preprocess)
df_val["Text"] = df_val["Text"].apply(preprocess)
df_train = df_train.dropna()
df_val = df_val.dropna()
# Positive sentiment to [0,1], negative sentiment to [1,0]
sentiment_train = []
sentiment_val = []
for i in df_train["Target"]:
if i == "Positive":
sentiment_train.append([0,1])
else:
sentiment_train.append([1,0])
df_train["Sentiment"] = sentiment_train
for i in df_val["Target"]:
if i == "Positive":
sentiment_val.append([0,1])
else:
sentiment_val.append([1,0])
df_val["Sentiment"] = sentiment_val
train_data_all, train_label_all, train_target_all = df_train["Text"].tolist(), df_train["Sentiment"].tolist(), df_train['Target'].tolist()
dev_data, dev_labels, dev_target = df_val["Text"].tolist(), df_val["Sentiment"].tolist(), df_val['Target'].tolist()
data = train_data_all+dev_data
labels = train_label_all+dev_labels
targets = train_target_all+dev_target
pairs = []
for c in zip(labels, data, targets):
if len(c[1]) > 0:
pairs.append(c)
random.seed(0)
random.shuffle(pairs)
N_EXAMPLES = len(pairs)
print("Total: {}".format(N_EXAMPLES))
new_data = []
new_label = []
new_target = []
i = 0 # positive
j = 0 # negative
parser = BobcatParser(verbose='text')
for label, sent, target in tqdm(pairs):
try:
diag = parser.sentence2diagram(sent)
except:
pass
else:
sent_length = len(sent.split(" "))
if i>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2 and j>round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
break
if target == "Positive" and i<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
i = i + 1
if target == 'Negative' and j<=round(N_EXAMPLES*TOTAL_DATA_RATIO)//2:
new_data.append(sent)
new_label.append(label)
new_target.append(target)
j = j + 1
cleaned_qnlp_data = {"data":new_data, "label":new_label, "target":new_target}
pickle.dump(cleaned_qnlp_data, open("cleaned_qnlp_data.pkl", "wb" ))
|
https://github.com/Kairos-T/QRNG
|
Kairos-T
|
from flask import Flask, render_template, request, jsonify
from qiskit import QuantumCircuit, Aer, transpile, assemble
import matplotlib.pyplot as plt
from io import BytesIO
import base64
import numpy as np
from scipy import stats
app = Flask(__name__)
# Global variables
number_counts = {}
total_generated = 0
def generate_random_number(min_value, max_value):
global total_generated
if min_value > max_value:
raise ValueError(
"Invalid range: Minimum value should be less than or equal to the maximum value")
num_bits = len(bin(max_value)) - 2
circuit = QuantumCircuit(num_bits, num_bits)
circuit.h(range(num_bits))
circuit.measure(range(num_bits), range(num_bits))
backend = Aer.get_backend('qasm_simulator')
result = backend.run(
assemble(transpile(circuit, backend=backend))).result()
counts = result.get_counts(circuit)
random_number = int(list(counts.keys())[0], 2)
# Ensure that the generated number is within the specified range
random_number = min(max(random_number, min_value), max_value)
# Update the count of the generated number in the dictionary
number_counts[random_number] = number_counts.get(random_number, 0) + 1
total_generated += 1
return random_number
def generate_numbers(min_value, max_value, num_samples=1):
global total_generated
if min_value > max_value:
raise ValueError(
"Invalid range: Minimum value should be less than or equal to the maximum value")
num_bits = len(bin(max_value)) - 2
backend = Aer.get_backend('qasm_simulator')
generated_numbers = []
for _ in range(num_samples):
circuit = QuantumCircuit(num_bits, num_bits)
circuit.h(range(num_bits))
circuit.measure(range(num_bits), range(num_bits))
result = backend.run(
assemble(transpile(circuit, backend=backend))).result()
counts = result.get_counts(circuit)
random_number = int(list(counts.keys())[0], 2)
# Ensure that the generated number is within the specified range
random_number = min(max(random_number, min_value), max_value)
# Update the count of the generated number in the dictionary
number_counts[random_number] = number_counts.get(random_number, 0) + 1
total_generated += 1
generated_numbers.append(random_number)
return generated_numbers
def remove_outliers(data, z_threshold=3):
z_scores = np.abs(stats.zscore(data))
outliers = np.where(z_scores > z_threshold)[0]
cleaned_data = [data[i] for i in range(len(data)) if i not in outliers]
return cleaned_data
def plot_bar_chart(remove_outliers_flag=False):
global number_counts
data_keys = list(number_counts.keys())
data_values = list(number_counts.values())
if remove_outliers_flag:
cleaned_data = remove_outliers(data_values)
number_counts = {key: value for key,
value in zip(data_keys, cleaned_data)}
data_values = cleaned_data
data_keys = data_keys[:len(data_values)]
plt.bar(data_keys, data_values)
plt.xlabel('Number')
plt.ylabel('Occurrences')
plt.title('Distribution of Numbers')
plt.grid(axis='y')
img = BytesIO()
plt.savefig(img, format='png')
img.seek(0)
plt.close()
return base64.b64encode(img.getvalue()).decode()
@app.route('/', methods=['GET', 'POST'])
def home():
random_number = None
error_message = None
if request.method == 'POST':
try:
min_value = int(request.form['min_value'])
max_value = int(request.form['max_value'])
if 'generate_100' in request.form:
generated_numbers = generate_numbers(
min_value, max_value, num_samples=100)
return render_template('index.html', generated_numbers=generated_numbers, number_counts=number_counts, total_generated=total_generated)
else:
random_number = generate_random_number(min_value, max_value)
except ValueError as e:
error_message = str(e)
return render_template('index.html', random_number=random_number, number_counts=number_counts, total_generated=total_generated, error_message=error_message)
@app.route('/generate_100_numbers', methods=['POST'])
def generate_100_numbers_route():
try:
min_value = int(request.form['min_value'])
max_value = int(request.form['max_value'])
generated_numbers = generate_numbers(
min_value, max_value, num_samples=100)
return render_template('index.html', generated_numbers=generated_numbers, number_counts=number_counts, total_generated=total_generated)
except ValueError as e:
return jsonify({'error': str(e)})
@app.route('/clear')
def clear_numbers():
global number_counts, total_generated
number_counts = {}
total_generated = 0
return render_template('index.html', random_number=None, number_counts=number_counts, total_generated=total_generated)
@app.route('/generate_graph', methods=['GET', 'POST'])
def generate_graph():
remove_outliers_flag = False
if request.method == 'POST' and 'remove_outliers' in request.form:
remove_outliers_flag = True
plot = plot_bar_chart(remove_outliers_flag)
return render_template('index.html', plot=plot, random_number=None, number_counts=number_counts, total_generated=total_generated)
if __name__ == '__main__':
app.run(debug=True)
|
https://github.com/SultanMS/Quantum-Neural-Network
|
SultanMS
|
# Quantum NNN: Ver 0.1, By Sultan Almuhammadi and Sarah Alghamdi
# Date: Dec 1, 2021
from sklearn import model_selection, datasets, svm
from qiskit import QuantumCircuit, Aer, IBMQ, QuantumRegister, ClassicalRegister
import qiskit
import numpy as np
import copy
import matplotlib.pyplot as plt1
import matplotlib.pyplot as plt2
iris = datasets.load_iris()
X = iris.data[0:100] # We only take the first two features.
Y = iris.target[0:100]
X_train, X_test, Y_train, Y_test, = model_selection.train_test_split(X, Y, test_size=0.33, random_state=42)
print(Y_train)
print(X_train[0])
print (len(Y_train), len(X_train), len(X_test), len(Y_test))
print (X_test)
N = 4
myX_train = [[0 for i in range(N)] for j in range(len(X_train))]
myX_test = [[0 for i in range(N)] for j in range(len(X_test))]
# Correct the size of the features basaed on N
for i in range(len(X_train)):
for j in range(N):
if (j<len(X_train[i])):
myX_train[i][j]=X_train[i][j]
else:
myX_train[i][j] = 1.0
for i in range(len(X_test)):
for j in range(N):
if (j<len(X_test[i])):
myX_test[i][j]=X_test[i][j]
else:
myX_test[i][j] = 1.0
print(myX_test)
def feature_map(X):
q = QuantumRegister(N)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
for i, x in enumerate(X):
qc.rx(x, i)
return qc, c
def variational_circuit(qc, theta):
for i in range(N-1):
qc.cnot(i, i+1)
qc.cnot(N-1, 0)
for i in range(N):
qc.ry(theta[i], i)
return qc
# SSultan Test
X = [5.2, 3.4, 1.4,0.2]
theta = [0.785]*N
myqc, myc = feature_map(X)
myqc = variational_circuit(myqc, theta)
myqc.measure(0, myc)
myqc.draw()
def quantum_nn(X, theta, simulator=True):
qc, c = feature_map(X)
qc = variational_circuit(qc, theta)
qc.measure(0, c)
shots= 10000
backend = Aer.get_backend('qasm_simulator')
if simulator == False:
shots = 1000
provider= IBMQ.load_account()
backend= provider.get_backend('ibmq_bogota')
job = qiskit.execute(qc, backend, shots=shots)
result = job.result()
counts = result.get_counts(qc)
return counts['1']/shots
def loss(prediction, target):
return (prediction - target)**2
def gradient (X,Y,theta):
delta = 0.01
grad = []
for i in range(len(theta)):
dtheta = copy.copy(theta)
dtheta[i] += delta
pred1 =quantum_nn(X,dtheta)
pred2 =quantum_nn(X,theta)
grad.append((loss(pred1, Y) - loss(pred2, Y)) / delta)
return np.array(grad)
def accuracy(X, Y, theta):
counter = 0
for i in range(len(X)):
X_i = X[i]
Y_i = Y[i]
prediction = quantum_nn(X_i, theta)
#pint(X_i, Y_i, prediction)
if prediction < 0.5 and Y_i == 0:
counter +=1
elif prediction >= 0.5 and Y_i == 1 :
counter +=1
return 100.0*counter/len(Y)
# main program
eta = 0.05
loss_list = []
acc_list = []
theta = np.ones(N)
print(' Epoch\t Loss\t Accuracy')
for k in range(10):
loss_tmp = []
for i in range(len(myX_train)):
X_i = myX_train[i]
Y_i = Y_train[i]
prediction = quantum_nn(X_i, theta)
loss_tmp.append(loss(prediction,Y_i))
theta =theta - eta * gradient(X_i, Y_i, theta)
loss_list.append(np.mean(loss_tmp))
acc = accuracy(myX_train, Y_train, theta)
acc_list.append(acc)
print(f'\t{k}\t {loss_list[-1]:.3f}\t {acc:.1f}')
plt1.plot(loss_list)
plt1.xlabel('Epoch')
plt1.ylabel('Loss')
plt1.show()
plt2.plot(acc_list)
plt2.xlabel('Epoch')
plt2.ylabel('Accuracy (%)')
plt2.show()
accuracy(myX_test, Y_test, theta)
X_sample = myX_test[6]
print('The predection of the test sample on a simulator:')
quantum_nn(X_sample, theta)
print('The predection of the test sample (for N = 4) on IBM-Q quantum computer (ibm_bogota):')
quantum_nn(X_sample, theta, simulator = False)
|
https://github.com/baronefr/perceptron-dqa
|
baronefr
|
import tensorflow as tf
import tensorflow.linalg as tfl
import numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
physical_devices = tf.config.list_physical_devices('GPU')
print('GPU devices:', physical_devices)
if physical_devices:
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
sigma_x = tfl.LinearOperatorFullMatrix([[0., 1.], [1., 0.]])
sigma_z = tfl.LinearOperatorFullMatrix([[1., 0.], [0., -1.]])
id = tfl.LinearOperatorIdentity(2)
def H_perc(data, labels):
n_data, n = data.shape
# define a batch of ientities operators where batch is the #data
id_batch = tfl.LinearOperatorIdentity(2, batch_shape=[n_data])
# define a batch of sigma_z operators where batch is the #data
sigma_z_batch = tf.tile(tf.expand_dims(tf.constant([[1., 0.], [0., -1.]]), axis=0), [n_data, 1, 1])
sigma_z_op = tfl.LinearOperatorFullMatrix(sigma_z_batch)
# define a batch of data operators where batch is the #data.
# each coordinate of a single datum is casted into a 2x2 operator,
# which will be composed with the corresponding sigma_z operator
# result is a list of n [n_data, 2, 2] opearators
data_ops = [tfl.LinearOperatorDiag(tf.repeat(data, [2], axis=1)[:,i:i+2]) for i in range(0, n*2, 2)]
ops = [tfl.LinearOperatorKronecker([id_batch]*i+[tfl.LinearOperatorComposition([data_ops[i], sigma_z_op])]+[id_batch]*(n-i-1)).to_dense() for i in range(n)]
# opearotrs are first stacked on the n dimension and reduced, then labels are applied,
# then heaviside step function is applied (relu), and finally reduced over n_data
return tf.reduce_sum(tf.nn.relu(-tf.reshape(labels, (-1, 1, 1))*tf.reduce_sum(tf.stack(ops), axis=0)), axis=0)# / tf.sqrt(n+0.)
# reimplementation of the previous function with an eye over ram usage
def H_perc_nobatch(data, labels):
n_data, n = data.shape
h_perc = tf.zeros((2**n, 2**n), dtype='float32')
for i in tqdm(range(n_data), desc='Constructing H_perc'):
op = tf.zeros((2**n, 2**n), dtype='float32')
for j in range(n):
data_op = tfl.LinearOperatorDiag(tf.repeat(data[i, :], [2], axis=0)[2*j:2*(j+1)])
op += tfl.LinearOperatorKronecker([id]*j+[tfl.LinearOperatorComposition([data_op, sigma_z])]+[id]*(n-j-1)).to_dense()
del data_op
h_perc += tf.nn.relu(-labels[i]*op)
del op
return h_perc #/ tf.sqrt(n+0.)
def H_x(n):
sigma_xs = [tfl.LinearOperatorKronecker([id]*i+[sigma_x]+[id]*(n-i-1)).to_dense() for i in range(n)]
return -tf.reduce_sum(tf.stack(sigma_xs), axis=0)
def H_z(n):
sigma_zs = [tfl.LinearOperatorKronecker([id]*i+[sigma_z]+[id]*(n-i-1)).to_dense() for i in range(n)]
return tf.reduce_sum(tf.stack(sigma_zs), axis=0)
def H_QA(p, P, Hz, Hx):
frac = p/P
return tf.cast(frac*Hz + (1-frac)*Hx, dtype='complex128')
def init_state(n):
return tf.ones((2**n,), dtype='complex128')/tf.sqrt((2.**n+0.j))
data = tf.constant([[1., -1.], [-1., -1.]])
labels = tf.constant([1., -1.])
h_perc = H_perc(data, labels)
h_perc
n_data, n = data.shape
# define a batch of ientities operators where batch is the #data
id_batch = tfl.LinearOperatorIdentity(2, batch_shape=[n_data])
# define a batch of sigma_z operators where batch is the #data
sigma_z_batch = tf.tile(tf.expand_dims(tf.constant([[1., 0.], [0., -1.]]), axis=0), [n_data, 1, 1])
sigma_z_op = tfl.LinearOperatorFullMatrix(sigma_z_batch)
# define a batch of data operators where batch is the #data.
# each coordinate of a single datum is casted into a 2x2 operator,
# which will be composed with the corresponding sigma_z operator
# result is a list of n [n_data, 2, 2] opearators
data_ops = [tfl.LinearOperatorDiag(tf.repeat(data, [2], axis=1)[:,i:i+2]) for i in range(0, n*2, 2)]
ops = [tfl.LinearOperatorKronecker([id_batch]*i+[tfl.LinearOperatorComposition([data_ops[i], sigma_z_op])]+[id_batch]*(n-i-1)).to_dense() for i in range(n)]
tfl.LinearOperatorKronecker([tfl.LinearOperatorFullMatrix(np.array([[0., 0.], [0., 1.]], 'float32')), id]).to_dense()
tfl.LinearOperatorKronecker([id, tfl.LinearOperatorFullMatrix(np.array([[0., 0.], [0., 1.]], 'float32'))]).to_dense()
n = 10
hadamard = tfl.LinearOperatorFullMatrix(np.array([[1., 1.], [1., -1.]], 'float32') / np.sqrt(2.))
ops = [tfl.LinearOperatorKronecker([id]*i+[hadamard]+[id]*(n-i-1)).to_dense() for i in range(n)]
np.diag(tfl.LinearOperatorKronecker([sigma_z]*10).to_dense())[:4]
tf.reduce_sum(tf.stack(ops), axis=0)[:,0]
# define a batch of sigma_z operators where batch is the #data
sigma_z_batch = tf.tile(tf.expand_dims(tf.constant([[1., 0.], [0., -1.]]), axis=0), [n_data, 1, 1])
sigma_z_op = tfl.LinearOperatorFullMatrix(sigma_z_batch)
# define a batch of data operators where batch is the #data.
# each coordinate of a single datum is casted into a 2x2 operator,
# which will be composed with the corresponding sigma_z operator
# result is a list of n [n_data, 2, 2] opearators
data_ops = [tfl.LinearOperatorDiag(tf.repeat(data, [2], axis=1)[:,i:i+2]) for i in range(0, n*2, 2)]
ops = [tfl.LinearOperatorKronecker([id_batch]*i+[tfl.LinearOperatorComposition([data_ops[i], sigma_z_op])]+[id_batch]*(n-i-1)).to_dense() for i in range(n)]
H_perc_nobatch(data, labels)
tfl.eigh(h_perc)
h_x = H_x(10)
h_x = tf.cast(h_x, 'complex64')
eigvals, eigvecs = tfl.eigh(h_x)
tfl.matmul(eigvecs, tfl.matmul(tfl.diag(tf.exp(-1.j*eigvals)), eigvecs, adjoint_b=True), adjoint_a=False)
datum_ops = [tfl.LinearOperatorDiag(tf.repeat(data[0,:], [2], axis=0)[i:i+2]) for i in range(0, 2*2, 2)]
datum_ops[1].to_dense()
init_state(2)
h_t = H_QA(1, 1_000, h_perc, h_x)
h_t
def ed_qa_step(state, Ht, dt):
eigvals, eigvecs = tfl.eigh(Ht)
# define time evolution operator
U = tf.exp(-1.j*eigvals*dt)
# rewrite state in eigenvector basis, apply time evolution operator, project back to computational basis
evolved = tfl.matvec(eigvecs, U * tfl.matvec(eigvecs, state, adjoint_a=True))
return evolved
def create_dataset(N : int, features : int):
"""Create dataset as described by ref. paper, i.e. random +-1 values."""
x = np.random.randint(2, size=(N, features))
x[ x == 0 ] = -1 # data is encoded as +- 1
return x
tau = 1_000
P = 1_000
dt = tau/P
N_data = 8
N_feat = 10
data = create_dataset(N_data, N_feat).astype('float32')
labels = tf.ones((N_data), 'float32')
data = np.load('patterns8_10.npy')
#h_perc = tf.cast(H_perc_nobatch(data, labels), 'complex128')
h_perc = tf.cast(H_perc_nobatch(data, labels), 'complex128')
E0 = tfl.eigh(h_perc)[0][0]
h_x = tf.cast(H_x(data.shape[1]), 'complex128')
#h_z = tf.cast(H_z(data.shape[1]), 'complex128')
state = init_state(data.shape[1])
loss = []
pbar = tqdm(range(P), desc='ED QA')
for i in pbar:
h_t = H_QA(i+1, P, h_perc, h_x)
state = ed_qa_step(state, h_t, dt)
loss.append(tf.cast((tf.tensordot(tf.math.conj(state), tfl.matvec(tf.cast(h_perc, 'complex128'), state), axes=1)-E0)/N_feat, 'float32').numpy())
pbar.set_postfix({'loss':loss[-1], 'norm':tf.cast(tf.norm(state), 'float32').numpy()})
state
np.save('result', state.numpy())
which_down = tf.where(tf.cast(tfl.eigh(h_perc)[1][0], 'float32') > 0.).numpy()
which_down
state.numpy()[which_down[0]]
plt.plot((np.array(range(P))+1)/P, abs(np.asarray(loss)))
plt.yscale('log')
plt.ylabel(r'$\epsilon(s)$')
plt.xlabel(r'$s$')
plt.grid(alpha=0.2)
# comparison with dQA approach
psi_dqa = np.load('stato2.npy')
psi_qa = np.load('result.npy')
tf.cast(tf.norm(tfl.matvec(tf.cast(psi_dqa, 'complex128'), psi_qa, adjoint_a=True )), 'float32')
tau = 500
P = 1_000
dt = tau/P
N_data = 3
N_feat = 4
data = create_dataset(N_data, N_feat)
labels = np.ones((N_data), 'float32')
h_perc_diag = H_perc_diag(data, labels)
E0 = np.sort(h_perc_diag)[0]
h_x = H_x(data.shape[1])
h_perc = np.diag(h_perc_diag)
eigvals_array = np.empty((P, 2**N_feat))
pbar = tqdm(range(P), desc='Spectral analysis')
for i in pbar:
h_t = H_QA(i+1, P, h_perc, h_x)
eigvals, eigvecs = ncp.linalg.eigh(h_t)
eigvals_array[i] = np.real_if_close(eigvals.get())
np.save('eigvals_3-4', eigvals_array)
plt.plot(np.linspace(0, 1, P), eigvals_array[:,::1], lw=1.8)
plt.xlabel(r'$s(t)$', fontsize=14)
plt.ylabel(r'Energy [$a.u.$]', fontsize=14)
plt.tick_params('both', which='major', labelsize=12)
plt.title('Quantum Annealing\neigenvalues evolution', fontsize=16)
#plt.grid(alpha=0.3)
plt.savefig('qa_eigevolution.svg')
plt.savefig('qa_eigevolution.svg')
state = tf.cast(psi_dqa.squeeze(), 'complex128')
epsilon = (tf.cast(tf.tensordot(tf.math.conj(state), tfl.matvec(h_perc, state), axes=1), 'float32').numpy()-E0)/N_data
epsilon
state
import matplotlib.pyplot as plt
import numpy as np
import cupy
import math
from datetime import datetime
from tqdm.notebook import tqdm
from importlib.util import find_spec
GPU = True
if GPU:
if find_spec('cupy') is not None:
import cupy as ncp
else:
print('Selected device is GPU but cupy is not installed, falling back to numpy')
import numpy as ncp
else:
import numpy as ncp
def kronecker_prod(operators):
result = operators[0]
for op in operators[1:]:
result = ncp.kron(result, op)
return result
def ReLU(x):
return x * (x > 0)
def H_perc_diag(data, labels):
## ASSUMING H_PERC IS DIAGONAL, WHICH IS IN THE COMPUTATIONAL BASIS ##
n_data, n = data.shape
identity = ncp.ones((2,), 'float64')
sigma_z = ncp.array([1., -1.])
h_perc = ncp.zeros((2**n,), dtype='float64')
for i in range(n_data):
op = ncp.zeros((2**n,), dtype='float64')
for j in range(n):
op += kronecker_prod([identity]*j+[data[i, j] * sigma_z]+[identity]*(n-j-1))
h_perc += ReLU(-labels[i]*op)
del op
return h_perc / ncp.sqrt(n)
def H_x(n):
identity = ncp.diag([1., 1.])
sigma_x = ncp.array([[0., 1.], [1., 0.]])
op = ncp.zeros((2**n, 2**n), dtype='float64')
for j in range(n):
op += kronecker_prod([identity]*j+[sigma_x]+[identity]*(n-j-1))
return -op
def H_QA(p, P, Hz, Hx):
frac = p/P
return (frac*Hz + (1-frac)*Hx).astype('complex128')
def init_state(n):
return (ncp.ones((2**n,))/ncp.sqrt(2**n)).astype('complex128')
def ed_qa_step(state, Ht, dt):
eigvals, eigvecs = ncp.linalg.eigh(Ht)
# define time evolution operator
U = ncp.exp(-1.j*eigvals*dt)
# rewrite state in eigenvector basis, apply time evolution operator, project back to computational basis
evolved = ncp.dot(eigvecs, U * ncp.dot(eigvecs.transpose().conjugate(), state))
return evolved
def create_dataset(N : int, features : int):
"""Create dataset as described by ref. paper, i.e. random +-1 values."""
x = np.random.randint(2, size=(N, features))
x[ x == 0 ] = -1 # data is encoded as +- 1
return x
tau = 1_000
P = 5
dt = 0.5
N_feats = np.array([i for i in range(1, 14)])
N_datas = np.arange(1, np.floor(0.83*N_feats[[-1]]), dtype='int64')
memory = cupy.get_default_memory_pool()
#data = np.load('../data/patterns8_10.npy')
N_datas
used_ram_tot = np.empty((N_feats.shape[0], N_datas.shape[0], P, 4))
times = np.empty((N_feats.shape[0], N_datas.shape[0], P+1))
data_pbar = tqdm(total=len(N_datas), desc='testing N_data', leave=True)
p_pbar = tqdm(total=P, desc='ED QA', leave=True)
for m, N_feat in enumerate(tqdm(N_feats, desc='testing N_feat')):
for n, N_data in enumerate(N_datas):
start = datetime.now()
data = create_dataset(N_data, N_feat)
labels = np.ones((N_data), 'float32')
h_perc_diag = H_perc_diag(data, labels)
E0 = np.sort(h_perc_diag)[0]
h_x = H_x(data.shape[1])
h_perc = np.diag(h_perc_diag)
#h_z = tf.cast(H_z(data.shape[1]), 'complex128')
state = init_state(data.shape[1])
end = datetime.now()
times[m, n, -1] = (end-start).microseconds
loss = []
for i in range(P):
start = datetime.now()
h_t = H_QA(i+1, P, h_perc, h_x)
used_ram_tot[m, n, i, 0] = memory.used_bytes()
state = ed_qa_step(state, h_t, dt)
used_ram_tot[m, n, i, 1] = memory.used_bytes()
loss.append(ncp.real_if_close((ncp.tensordot(state.conjugate(), ncp.dot(h_perc, state), axes=1)-E0)/N_feat))
end = datetime.now()
times[m, n, i] = (end-start).microseconds
pbar.set_postfix({'loss':ncp.real_if_close(loss[-1]), 'norm':ncp.real_if_close(ncp.linalg.norm(state))})
used_ram_tot[m, n, i, 2] = memory.used_bytes()
del h_t
used_ram_tot[m, n, i, 3] = memory.used_bytes()
p_pbar.update()
del state
p_pbar.refresh()
p_pbar.reset()
data_pbar.update()
memory.free_all_blocks()
data_pbar.refresh()
data_pbar.reset()
used_ram_tot = np.load('perf_ram.npy')
times = np.load('perf_time.npy')
FONTSIZE=12
fig, (ax_ram, ax_time) = plt.subplots(2, 1, figsize=(8, 7), tight_layout=True, sharex=True)
ram_cmap = ax_ram.imshow(used_ram_tot[:,:,:,2].mean(axis=2).T/1e3, cmap='plasma', norm='log', origin='lower')
time_cmap = ax_time.imshow((times[:,:,:-1].mean(axis=2) + times[:,:,-1]).T/1e3, cmap='viridis', norm='log', origin='lower')
ax_ram.set_ylabel('#patterns', fontsize=FONTSIZE+2)
ax_ram.tick_params(axis='y', which='major', labelsize=FONTSIZE)
ax_ram.set_yticks(range(0,len(N_datas),2), labels=N_datas[::2])
ax_ram.set_title(r'RAM usage [$KB$]', fontsize=FONTSIZE+4)
ax_time.set_xlabel('#qbits', fontsize=FONTSIZE+2)
ax_time.set_ylabel('#patterns', fontsize=FONTSIZE+2)
ax_time.tick_params(axis='both', which='major', labelsize=FONTSIZE)
ax_time.set_xticks(range(0,len(N_feats),2), labels=N_feats[::2])
ax_time.set_yticks(range(0,len(N_datas),2), labels=N_datas[::2])
ax_time.set_title(r'Execution time [$ms$]', fontsize=FONTSIZE+4)
fig.colorbar(ram_cmap, ax=ax_ram)
fig.colorbar(time_cmap, ax=ax_time)
fig.suptitle('Exact diagonalization performances', fontsize=FONTSIZE+6, x=0.5845)
fig.savefig('ed_performances.svg')
plt.imshow(times[:,:,-1].T, cmap='plasma', origin='lower')
plt.plot(((ncp.array(range(P))+1)/P).get(), ncp.asarray(loss).get())
plt.yscale('log')
plt.ylabel(r'$\epsilon(s)$')
plt.xlabel(r'$s$')
plt.grid(alpha=0.2)
FONTSIZE=12
fig, ax = plt.subplots(figsize=(5, 4), tight_layout=True)
handle1 = ax.plot(N_feats, used_ram_tot[:,:,2].mean(axis=1)/1e3, label='RAM', lw=2)
ax.set_yscale('log')
ax.set_xlabel('#qbits', fontsize=FONTSIZE+2)
ax.set_xticks(N_feats[1::2])
ax.tick_params(axis='both', which='major', labelsize=FONTSIZE)
ax.set_ylabel(r'RAM usage [$KB$]', fontsize=FONTSIZE+2)
ax2 = ax.twinx()
handle2 = ax2.plot(N_feats, times[:,:-1].mean(axis=1)/1e3, c='tab:orange', label='Time', lw=2)
ax2.set_yscale('log')
ax2.tick_params(axis='y', which='major', labelsize=FONTSIZE)
ax2.set_ylabel(r'Execution time [$ms$]', fontsize=FONTSIZE+2)
ax.legend(handles=handle1+handle2, fontsize=FONTSIZE+2)
ax.set_title('Exact diagonalization performances', fontsize=FONTSIZE+4)
ax.grid(alpha=0.3)
ax2.grid(alpha=0.3)
plt.plot(N_feats, times[:,-1])
FONTSIZE=12
fig, ax = plt.subplots(figsize=(5, 4), tight_layout=True)
handle1 = ax.plot(N_feats, used_ram_tot[:,:,2].mean(axis=1)/1e3, label='RAM', lw=2)
ax.set_yscale('log')
ax.set_xlabel('#qbits', fontsize=FONTSIZE+2)
ax.set_xticks(N_feats[1::2])
ax.tick_params(axis='both', which='major', labelsize=FONTSIZE)
ax.set_ylabel(r'RAM usage [$KB$]', fontsize=FONTSIZE+2)
ax2 = ax.twinx()
handle2 = ax2.plot(N_feats, times[:,:-1].mean(axis=1)/1e3, c='tab:orange', label='Time', lw=2)
ax2.set_yscale('log')
ax2.tick_params(axis='y', which='major', labelsize=FONTSIZE)
ax2.set_ylabel(r'Execution time [$ms$]', fontsize=FONTSIZE+2)
ax.legend(handles=handle1+handle2, fontsize=FONTSIZE+2)
ax.set_title('Exact diagonalization performances', fontsize=FONTSIZE+4)
ax.grid(alpha=0.3)
ax2.grid(alpha=0.3)
plt.plot(N_feats, times[:,-1])
def H_perc2(state, data, labels):
n_data, n = data.shape
h_perc = tf.zeros((2**n, 2**n), dtype='complex128')
id = tfl.LinearOperatorIdentity(2)
sigma_z = tfl.LinearOperatorFullMatrix(tf.constant([[1., 0.], [0., -1.]]))
for i in range(n_data):
datum_ops = [tfl.LinearOperatorDiag(tf.repeat(data[i,:], [2], axis=0)[j:j+2]) for j in range(0, n*2, 2)]
ops = [tfl.LinearOperatorKronecker([id]*i+[tfl.LinearOperatorComposition([sigma_z, datum_ops[i]])]+[id]*(n-i-1)).to_dense() for j in range(n)]
result_op = tf.cast(-tf.reshape(labels[i], (1, 1))*tf.reduce_sum(tf.stack(ops), axis=0), 'complex128')
if tf.cast(tf.tensordot(tf.math.conj(state), tfl.matvec(result_op, state), axes=1), 'float32') < 0.:
continue
else:
h_perc = h_perc + result_op
return h_perc
|
https://github.com/baronefr/perceptron-dqa
|
baronefr
|
# ====================================================
# Quantum Information and Computing exam project
#
# UNIPD Project | AY 2022/23 | QIC
# group : Barone, Coppi, Zinesi
# ----------------------------------------------------
# > description |
#
# class setup of dQA execution
# ----------------------------------------------------
# coder : Zinesi Paolo
# dated : 27 March 2023
# ver : 1.0.0
# ====================================================
from qiskit import QuantumCircuit, QuantumRegister, AncillaRegister
from qiskit.circuit.library import QFT, IntegerComparator
import numpy as np
class HammingEvolution:
"""
Class to generate all the modules of Heaviside evolution circuit consistently.
"""
def __init__(self, num_data_qubits : int) -> None:
# infer number ancillas used to count, number of ancillas used to compare Hamming distance
self._num_data_qubits = num_data_qubits
self._num_count_ancillas = int(np.ceil(np.log2(self._num_data_qubits+1)))
# in this situation the comparison is really simple
self._simple_compare = (self._num_data_qubits + 1 == 2**self._num_count_ancillas)
#ย circuit initializer
self._data_qubits = QuantumRegister(self._num_data_qubits)
self._count_ancillas = AncillaRegister(self._num_count_ancillas)
self._qc = QuantumCircuit(self._data_qubits, self._count_ancillas)
# intialize comparison ancillas if necessary
if not self._simple_compare:
self._num_comp_ancillas = self._num_count_ancillas
self._comp_ancillas = AncillaRegister(self._num_count_ancillas)
self._qc.add_register(self._comp_ancillas)
# ancilla in which the Heaviside control will be stored
if self._simple_compare:
self._control_ancilla = self._count_ancillas[-1]
else:
self._control_ancilla = self._comp_ancillas[0]
@property
def num_data_qubits(self):
return self._num_data_qubits
@property
def num_count_ancillas(self):
return self._num_count_ancillas
@property
def simple_compare(self):
return self._simple_compare
@property
def data_qubits(self):
return self._data_qubits
@property
def count_ancillas(self):
return self._count_ancillas
@property
def qc(self):
return self._qc.copy()
@property
def num_comp_ancillas(self):
if self._simple_compare:
return 0
else:
return self._num_comp_ancillas
@property
def comp_ancillas(self):
if self._simple_compare:
return []
else:
return self._comp_ancillas
@property
def num_ancillas(self):
return self.num_count_ancillas + self.num_comp_ancillas
@property
def ancillas(self):
return list(self.count_ancillas) + list(self.comp_ancillas)
@property
def qubits(self):
return list(self.data_qubits) + list(self.count_ancillas) + list(self.comp_ancillas)
@property
def control_ancilla(self):
return self._control_ancilla
def init_state_plus(self):
"""
Generate a circuit where all the qubits are initialized at |+> = H|0> intead of simply |0>.
"""
# return a new copy of the circuit, but with the same number of qubits for consistency
circ = self.qc.copy()
for iq in range(self.num_data_qubits):
circ.h(self.data_qubits[iq])
return circ
def Hamming_count(self, train_data):
"""
Generate circuit of `self.num_data_qubits` qubits that counts the Hamming distance from the training data.
The count is stored in the `self.count_ancillas` qubits.
- train_data: vector of training data.
Conventions:
- (1,-1) <--> (|0>,|1>)
- little endians: least significant bit is the last one of the string
"""
assert len(train_data) == self.num_data_qubits, "Wrong dimension of training data"
# return a new copy of the circuit, but with the same number of qubits for consistency
circ = self.qc.copy()
# flip only when the training data is -1: in this way the circuit can simply count the number
# of states that are |1>
# little endians convention is applied !!! train_data[::-1] !!!
for iq, train_data_i in enumerate(train_data[::-1]):
if train_data_i == -1:
circ.x(self.data_qubits[iq])
#ย initial Hadamards to create superposition in the counter register
for ia in range(self.num_count_ancillas):
circ.h(self.count_ancillas[ia])
# Phase estimation
for ia in range(self.num_count_ancillas):
# the order is from the lowest index of the ancilla to the highest
n_reps = 2**ia
# repeat n_reps times the application of the unitary gate controlled on the ancillary qubit
for rep_idx in range(n_reps):
for iq in range(self.num_data_qubits):
circ.cp(2*np.pi/2**self.num_count_ancillas, self.count_ancillas[ia], self.data_qubits[iq])
# invert flip applied previously to count the number of |1>
# little endians convention is applied !!! train_data[::-1] !!!
for iq, train_data_i in enumerate(train_data[::-1]):
if train_data_i == -1:
circ.x(self.data_qubits[iq])
circ.barrier()
qft_circ = QFT(self.num_count_ancillas, inverse=True).decompose(reps=1)
circ = circ.compose(qft_circ, self.count_ancillas)
# add an additional comparison circuit if needed
if not self.simple_compare:
circ = circ.compose(IntegerComparator(self.num_count_ancillas, int(np.ceil(self.num_data_qubits/2.0)), geq=True).decompose(reps=1),
qubits=self.ancillas)
return circ
def U_z(self, train_data, gamma):
"""
Generate circuit for Uz evolution according to the training data and the value of gamma.
- train_data: vector of training data.
- gamma: multiplicative float in the time evolution definition.
Conventions:
- (1,-1) <--> (|0>,|1>)
- little endians: least significant bit is the last one of the string
"""
assert len(train_data) == self.num_data_qubits, "Wrong dimension of training data"
# return a new copy of the circuit, but with the same number of qubits for consistency
circ = self.qc.copy()
circ.barrier()
#ย define controlled operation on the 'ancilla_index'
# little endians convention is applied !!! iq and idata goes on opposite directions !!!
for iq, idata in zip(range(self.num_data_qubits),range(len(train_data)-1,-1,-1)):
circ.crz(-2*gamma*train_data[idata]/np.sqrt(self.num_data_qubits), self.control_ancilla, self.data_qubits[iq])
circ.barrier()
return circ
def U_x(self, beta):
"""
Generate circuit for Ux evolution according to the value of beta.
- beta: multiplicative float in the time evolution definition.
"""
# return a new copy of the circuit, but with the same number of qubits for consistency
circ = self.qc.copy()
circ.barrier()
for iq in range(self.num_data_qubits):
circ.rx(-2*beta, self.data_qubits[iq])
return circ
def single_step_composer(self, qc, dataset, beta_p : float, gamma_p : float, tracking_function = None):
"""Define how a circuit is composed for each step in dQA."""
if qc is None: qc = self.qc.copy()
for mu in range( dataset.shape[0] ):
# create Hamming error counter circuit based on the given pattern
qc_counter = self.Hamming_count(train_data = dataset[mu,:])
qc_counter_inverse = qc_counter.inverse()
#ย create Uz evolution circuit
qc_Uz = self.U_z(train_data = dataset[mu,:], gamma=gamma_p)
#ย compose all circuits to evolve according to Uz
qc.compose(qc_counter, inplace=True)
qc.compose(qc_Uz, inplace=True)
qc.compose(qc_counter_inverse, inplace=True)
#ย create and apply Ux evolution circuit
qc_Ux = self.U_x(beta_p)
qc.compose(qc_Ux, inplace=True)
if tracking_function is not None:
tracking_function( [qc_counter, qc_Uz, qc_counter_inverse, qc_Ux], compose=True)
return qc
|
https://github.com/baronefr/perceptron-dqa
|
baronefr
|
import numpy as np
from tqdm import tqdm
import qiskit.quantum_info as qi
from qiskit import QuantumCircuit, QuantumRegister, AncillaRegister
GPU = True
from importlib.util import find_spec
if GPU:
if find_spec('cupy') is not None:
import cupy as ncp
else:
print('Selected device is GPU but cupy is not installed, falling back to numpy')
import numpy as ncp
else:
import numpy as ncp
####################################
#### GET PERCEPTRON HAMILTONIAN ####
####################################
def kronecker_prod(operators):
result = operators[0]
for op in operators[1:]:
result = np.kron(result, op)
return result
def ReLU(x):
return x * (x > 0)
def H_perc_nobatch(data, labels):
n_data, n = data.shape
sigma_z = np.diag([1., -1.])
identity = np.diag([1., 1.])
h_perc = np.zeros((2**n, 2**n), dtype='float32')
for i in tqdm(range(n_data), desc='Constructing H_perc'):
op = np.zeros((2**n, 2**n), dtype='float32')
for j in range(n):
op += kronecker_prod([identity]*j+[data[i, j] * sigma_z]+[identity]*(n-j-1))
h_perc += ReLU(-labels[i]*op)
del op
return (h_perc / np.sqrt(n)).astype('complex')
def H_perc_diag(data, labels):
## ASSUMING H_PERC IS DIAGONAL, WHICH IS IN THE COMPUTATIONAL BASIS ##
n_data, n = data.shape
identity = np.ones((2,), 'float32')
sigma_z = np.array([1., -1.])
h_perc = np.zeros((2**n,), dtype='float32')
for i in tqdm(range(n_data), desc='Constructing H_perc'):
op = np.zeros((2**n,), dtype='float32')
for j in range(n):
op += kronecker_prod([identity]*j+[data[i, j] * sigma_z]+[identity]*(n-j-1))
h_perc += ReLU(-labels[i]*op)
del op
return ncp.array((h_perc / np.sqrt(n)).astype('complex128'))
########################
##### LOSS TRACKER #####
########################
class LossTracker:
def __init__(self, num_qubits, num_ancillae, init_state):
self.n_qubits = num_qubits
self.n_ancillae = num_ancillae
if type(init_state) is qi.Statevector:
self._statevecs = [init_state]
elif type(init_state) is QuantumCircuit:
self._statevecs = [qi.Statevector.from_instruction(init_state)]
else:
print('type', type(init_state), 'of init_state is not valid')
self._h_perc = None
self._little_endian = True
self._statevecs_arr = None
def track(self, qc, compose=True):
if compose:
# create a copy of the current circuit internally
self.current_qc = QuantumCircuit(QuantumRegister(self.n_qubits), AncillaRegister(self.n_ancillae))
# compose the circuit
if type(qc) is list:
for circuit in qc:
self.current_qc.compose(circuit, inplace=True)
elif type(qc) is QuantumCircuit:
self.current_qc.compose(qc, inplace=True)
else:
print('Error: type of qc is', type(qc))
return
else:
self.current_qc = qc
# track the state
self._statevecs.append(self._statevecs[-1].evolve(self.current_qc))
del self.current_qc
@property
def statevecs(self):
return self._statevecs
@statevecs.setter
def set_statevecs(self, value):
self._statevecs = [value]
@property
def h_perc(self):
return self._h_perc
def reset(self, num_qubits=None, num_ancillae=None):
self._statevecs.clear()
self._statevecs_arr = None
if num_qubits:
self.n_qubits = num_qubits
if num_ancillae:
self.n_ancillae = num_ancillae
def finalize(self):
## convert statevectors to arrays, keep only qubits of interest
arr_list = []
for state in tqdm(self._statevecs, desc='finalizing LossTracker'):
out_red = qi.partial_trace(state, range(self.n_qubits, self.n_qubits + self.n_ancillae))
prob, st_all = ncp.linalg.eigh(ncp.array(out_red.data))
idx = ncp.argmax(prob)
arr_list.append(st_all[:, idx].copy())
del out_red, prob, st_all, idx
self._statevecs_arr = ncp.stack(arr_list)
del arr_list
def finalize_opt(self):
## instead of tracing outs qubit we can simply modify the hamiltonian by putting
## identities on the ancillary qubits, remembering that qiskit uses little endians.
self._statevecs_arr = ncp.array(np.stack([state.data for state in self._statevecs]))
def __loss(self, statevec, h_perc):
return ncp.vdot(statevec, h_perc * statevec)
def get_losses(self, data, little_endian=True, labels=None):
if len(self._statevecs) == 0:
print('Error: no statevectors has been tracked down, please call track() before')
return
if labels is None:
labels = np.ones((data.shape[0],))
if self._statevecs_arr is None:
print('LossTracker was not finalized, finalizing...')
self.finalize()
print('Done!')
if self._h_perc is None or self._little_endian != little_endian:
if little_endian:
self._h_perc = H_perc_diag(data, labels)
else:
# invert data components if the circuit was constructed in big endian mode
# NOT SURE IF THIS WORKS
self._h_perc = H_perc_diag(data[:,::-1], labels)
self._little_endian = little_endian
result = ncp.real_if_close(ncp.apply_along_axis(self.__loss, axis=1, arr=self._statevecs_arr, h_perc=self._h_perc))
if type(result) is np.ndarray: return result
else: return result.get()
def get_losses_opt(self, data, little_endian=True, labels=None):
if len(self._statevecs) == 0:
print('Error: no statevectors has been tracked down, please call track() before')
return
if labels is None:
labels = np.ones((data.shape[0],))
if self._statevecs_arr is None:
print('LossTracker was not finalized, finalizing...')
self.finalize_opt()
print('Done!')
if self._h_perc is None or self._little_endian != little_endian:
if little_endian:
self._h_perc = kronecker_prod([ncp.ones((2,))]*self.n_ancillae + [H_perc_diag(data, labels)])
else:
# invert data components if the circuit was constructed in big endian mode
# NOT SURE IF THIS WORKS
self._h_perc = kronecker_prod([ncp.ones((2,))]*self.n_ancillae + [H_perc_diag(data[:,::-1], labels)])
self._little_endian = little_endian
result = ncp.real_if_close(ncp.apply_along_axis(self.__loss, axis=1, arr=self._statevecs_arr, h_perc=self._h_perc))
if type(result) is np.ndarray: return result
else: return result.get()
def get_edensity(self, data, little_endian=True, opt=True, labels=None):
if opt:
losses = self.get_losses_opt(data, little_endian=little_endian, labels=labels)
else:
losses = self.get_losses(data, little_endian=little_endian, labels=labels)
e0 = np.real_if_close(np.sort(self._h_perc if type(self._h_perc) is np.ndarray else self._h_perc.get())[0])
self.e0 = e0
return (losses-e0) / data.shape[1]
if __name__ == '__main__':
# example code
def your_evolution(p, num_qubits, num_ancillae):
# implement yout evolution here
qc = QuantumCircuit(num_qubits+num_ancillae)
if p == 1:
qc.h(range(num_qubits))
if p == 2:
qc.h(range(num_qubits))
qc.x(range(num_qubits, num_qubits+num_ancillae))
if p == 3:
qc.x( 3)
if p == 4:
qc.x(2)
return qc
N_data = 3
N_feat = 4
data = np.array([[1., 1., 1., 1.],
[1., -1., 1., -1.],
[1., -1., 1., 1.]])
labels = np.ones((N_data, ))
P = 4
num_qubits = 4
num_ancillae = 2
qc_tot = QuantumCircuit(num_qubits + num_ancillae)
loss_tracker = LossTracker(num_qubits, num_ancillae, init_state=qc_tot)
# apply evoltion
for p in range(P):
qc = your_evolution(p+1, num_qubits, num_ancillae)
loss_tracker.track(qc, compose=True)
qc_tot = qc_tot.compose(qc)
qc_tot.draw()
print(loss_tracker.get_losses(data, little_endian=True))
|
https://github.com/baronefr/perceptron-dqa
|
baronefr
|
import quimb as qu
import quimb.tensor as qtn
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
from tqdm import tqdm
# to use alternative backends:
#qtn.contraction.set_contract_backend('torch')
N = 12 # number of spins/sites/parameters/qubits
P = 100 # total number of QA steps // should be 100/1000
dt = 0.5 # time interval
# Note: tau (annealing time) will be fixed as P*dt
max_bond = 10 # MPS max bond dimension
N_xi = 9 # dataset size (number of patterns)
def apply_compress(mpo, mps, max_bond = 8, method = 'svd'):
"""Apply mpo to mps and compress to max bond dimension."""
# note: prev default arg method = 'svds'
return mpo._apply_mps(mps, compress=True, method=method, max_bond=max_bond)
def create_dataset(N : int, features : int):
"""Create dataset as described by ref. paper, i.e. random +-1 values."""
x = np.random.randint(2, size=(N, features))
x[ x == 0 ] = -1 # data is encoded as +- 1
return x
def make_Ux(N, beta_p, dtype = np.complex128):
"""Return as MPO the U_x evolution operator at time-parameter beta_p."""
tb = np.array( [[np.cos(beta_p), 1j*np.sin(beta_p)],[1j*np.sin(beta_p), np.cos(beta_p)]], dtype=dtype)
arrays = [ np.expand_dims(tb, axis=0) ] + \
[ np.expand_dims(tb, axis=(0,1)) for _ in range(N-2) ] + \
[ np.expand_dims(tb, axis=0) ]
return qtn.tensor_1d.MatrixProductOperator( arrays )
def Wz(N, Uk : np.array, xi : int, marginal = False, dtype = np.complex128):
"""The tensors of Eq. 17 of reference paper."""
bond_dim = len(Uk)
shape = (bond_dim,2,2) if marginal else (bond_dim,bond_dim,2,2)
tensor = np.zeros( shape, dtype = dtype )
coeff = np.power( Uk/np.sqrt(N+1), 1/N)
exx = 1j * np.arange(bond_dim) * np.pi / (N + 1) # check: N+1
for kk in range(bond_dim):
spin_matrix = np.diag(
[ coeff[kk]*np.exp(exx[kk]*(1-xi)),
coeff[kk]*np.exp(exx[kk]*(1+xi)) ]
)
if marginal: tensor[kk,:,:] = spin_matrix
else: tensor[kk,kk,:,:] = spin_matrix
return tensor
def make_Uz(N : int, Uk : np.array, xi : np.array, bond_dim=None, dtype = np.complex128):
"""Return as MPO the U_z evolution operator at time s_p (defined indirectly by Uk)."""
# Uk must be a vector for all k values, while p is fixed
# xi must be a single sample from dataset
assert len(xi) == N, 'not matching dims'
arrays = [ Wz(N, Uk, xi[0], marginal = True, dtype = dtype) ] + \
[ Wz(N, Uk, xi[i+1], dtype = dtype) for i in range(N-2) ] + \
[ Wz(N, Uk, xi[N-1], marginal = True, dtype = dtype) ]
return qtn.tensor_1d.MatrixProductOperator( arrays )
def Ux_p(N, d, beta_p):
""" Build factorized Ux(beta_p) (bond dimension = 1) on N sites"""
Ux_i = np.identity(d)*np.cos(beta_p) + 1.0j*(np.ones(d)-np.identity(d))*np.sin(beta_p) # single site operator
Ux = qtn.MPO_product_operator([Ux_i]*N, upper_ind_id='u{}', lower_ind_id='s{}')
return Ux
def Uz_p_mu(N, d, p, mu, Uz_FT_, patterns):
""" Build Uz^mu(gamma_p) (bond dimension = N+1) on N sites
- p in range(1,P+1)
"""
Uz_i = []
# leftermost tensor (i = 1)
i = 1
tens = np.zeros((N+1,d,d), dtype=np.complex128)
for s_i in range(d):
tens[:,s_i,s_i] = np.power(Uz_FT_[:,p-1]/np.sqrt(N+1), 1/N) * np.exp(1.0j * (np.pi/(N+1)) * np.arange(N+1) * (1-patterns[mu,i-1]*(-1)**s_i))
Uz_i.append(tens.copy())
# bulk tensors (2 <= i <= N-1)
for i in range(2,N):
tens = np.zeros((N+1,N+1,d,d), dtype=np.complex128)
for s_i in range(d):
np.fill_diagonal(tens[:,:,s_i,s_i],
np.power(Uz_FT_[:,p-1]/np.sqrt(N+1), 1/N) * np.exp(1.0j * (np.pi/(N+1)) * np.arange(N+1) * (1-patterns[mu,i-1]*(-1)**s_i)))
Uz_i.append(tens.copy())
# rightermost tensor (i = N)
i = N
tens = np.zeros((N+1,d,d), dtype=np.complex128)
for s_i in range(d):
tens[:,s_i,s_i] = np.power(Uz_FT_[:,p-1]/np.sqrt(N+1), 1/N) * np.exp(1.0j * (np.pi/(N+1)) * np.arange(N+1) * (1-patterns[mu,i-1]*(-1)**s_i))
Uz_i.append(tens.copy())
Uz = qtn.tensor_1d.MatrixProductOperator(Uz_i, upper_ind_id='u{}', lower_ind_id='s{}') # lower is contracted with psi
return Uz
def h_perceptron(m, N):
""" Cost function to be minimized in the perceptron model, depending on the overlap m.
The total H_z Hamiltonian is obtained as a sum of these cost functions evaluated at each pattern csi_mu.
h(m) = 0 if m>=0 else -m/sqrt(N)
"""
m = np.array(m)
return np.where(m>=0, 0, -m/np.sqrt(N)).squeeze()
def f_perceptron(x, N):
""" Cost function to be minimized in the perceptron model, depending on the Hamming distance x.
The total H_z Hamiltonian is obtained as a sum of these cost functions evaluated at each pattern csi_mu.
f(x) = h(N - 2x) = h(m(x)) with m(x) = N - 2x
"""
m = N - 2*np.asarray(x)
return h_perceptron(m, N)
fx_FT = fft.fft(f_perceptron(range(N+1), N), norm="ortho")
def Hz_mu_singleK(N, mu, K, f_FT_, patterns, dtype=np.complex128):
""" Build factorized Hz^{mu,k} (bond dimension = 1) on N sites"""
Hz_i = []
for i in range(N):
tens = np.zeros((2,2), dtype=dtype)
for s_i in range(2):
tens[s_i,s_i] = np.power(f_FT_[K]/np.sqrt(N+1), 1/N) * \
np.exp( 1.0j * (np.pi/(N+1)) * K *\
(1-patterns[mu,i]*((-1)**s_i)) )
Hz_i.append(tens) # removed copy
Hz = qtn.MPO_product_operator(Hz_i, upper_ind_id='u{}', lower_ind_id='s{}')
return Hz
# this is really the same!
def Hz_mu_singleK(N, mu, K, f_FT_, patterns):
""" Build factorized Hz^{mu,k} (bond dimension = 1) on N sites"""
d = 2
Hz_i = []
for i in range(1,N+1):
tens = np.zeros((d,d), dtype=np.complex128)
for s_i in range(d):
tens[s_i,s_i] = np.power(f_FT_[K]/np.sqrt(N+1), 1/N) * np.exp(1.0j * (np.pi/(N+1)) * K * (1-patterns[mu,i-1]*(-1)**s_i))
Hz_i.append(tens.copy())
Hz = qtn.MPO_product_operator(Hz_i)#, upper_ind_id='u{}', lower_ind_id='s{}')
return Hz
def compute_loss(psi, N, fxft, xi):
N_tens = psi.num_tensors
eps = 0.0
for mu in range(N_xi):
for kk in range(N+1):
mpo = Hz_mu_singleK(N, mu, kk, fxft, xi) # store!
pp = psi.copy()
pH = pp.H
pp.align_(mpo, pH)
#pH = psi.reindex({f"s{i}":f"u{i}" for i in range(N_tens)}).H
tnet = pH & mpo & pp
eps += (tnet.contract()/N_tens)
return eps
try:
xi = np.load('../data/patterns_9-12.npy')
except:
xi = create_dataset(N, N_xi)
# this is the initial state, an MPS of bond_dim = 1
psi = qu.tensor.tensor_builder.MPS_product_state(
[ np.array([[2**-0.5, 2**-0.5]], dtype=np.complex128) ] * N,
tags=['psi'],
)
tau = dt * P
# keep track of loss function
loss = []
# etc
cc = []
# fourier transform of U_z -> U_k
Uk_FT = np.zeros((N+1,P), dtype=np.complex128)
for p in range(0,P):
Uk_FT[:,p] = fft.fft( np.exp(-1.0j*((p+1)/P)*dt*f_perceptron(range(N+1), N)), norm="ortho")
compute_loss(psi, N, fx_FT, xi)
crop_p = None
loss.append( (0, compute_loss(psi, N, fx_FT, xi)) )
print('dQA---')
print(' tau = {}, P = {}, dt = {}'.format(tau, P, dt) )
if crop_p is not None:
print(' [!] simulation will be stopped at iter', crop_p)
# finally... RUN!
with tqdm(total=P, desc='QAnnealing') as pbar:
for pp in range(P):
s_p = (pp+1)/P
beta_p = (1-s_p)*dt
#gamma_p = s_p*dt # not needed
# loop over patterns
for mu in range(N_xi):
Uz = make_Uz(N, Uk_FT[:,pp], xi[mu])
#Uz = Uz_p_mu(N, 2, pp+1, mu, Uk_FT, patterns=xi) # from Paolo
curr_bdim = psi.tensors[int(N/2)].shape[0]
cc.append( curr_bdim )
#psi = Uz._apply_mps( psi, compress = False)
psi = apply_compress(Uz, psi, max_bond=max_bond, method='svd')
Ux = make_Ux(N, beta_p = beta_p)
#Ux = Ux_p(N, 2, beta_p=beta_p) # from Paolo
psi = Ux.apply( psi, compress = False)
# evaluate <psi | H | psi>
expv = compute_loss(psi, N, fx_FT, xi)
loss.append( (s_p, expv) )
# etc
pbar.update(1)
pbar.set_postfix_str("loss = {}, bd = {}".format( np.round(expv, 5), curr_bdim ) )
if crop_p is not None:
if pp == crop_p: break
plt.plot( *zip(*loss) )
plt.yscale('log')
plt.title('dQA')
plt.show()
loss[-1]
plt.plot(cc)
# [INFO] to eventually save the curve...
#np.save('../data/quimb-demo-dqa.npy', np.array([el[1] for el in loss]))
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import clear_output
!pip install qiskit[all]
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
!pip install qiskit_algorithms
from qiskit_algorithms.optimizers import COBYLA,L_BFGS_B
from qiskit_algorithms.utils import algorithm_globals
!pip install qiskit_machine_learning
from qiskit_machine_learning.algorithms.classifiers import NeuralNetworkClassifier, VQC
from qiskit_machine_learning.algorithms.regressors import NeuralNetworkRegressor, VQR
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.circuit.library import QNNCircuit
algorithm_globals.random_seed = 42
num_inputs = 2
num_samples = 20
X = 2 * algorithm_globals.random.random([num_samples, num_inputs]) - 1
y01 = 1 * (np.sum(X, axis=1) >= 0) # in { 0, 1}
y = 2 * y01 - 1 # in {-1, +1}
y_one_hot = np.zeros((num_samples, 2))
for i in range(num_samples):
y_one_hot[i, y01[i]] = 1
for x, y_target in zip(X, y):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
# construct QNN with the QNNCircuit's default ZZFeatureMap feature map and RealAmplitudes ansatz.
qc = QNNCircuit(num_qubits=2)
qc.draw(output="mpl")
estimator_qnn = EstimatorQNN(circuit=qc)
# QNN maps inputs to [-1, +1]
estimator_qnn.forward(X[0, :], algorithm_globals.random.random(estimator_qnn.num_weights))
# callback function that draws a live plot when the .fit() method is called
def callback_graph(weights, obj_func_eval):
clear_output(wait=True)
objective_func_vals.append(obj_func_eval)
plt.title("Objective function value against iteration")
plt.xlabel("Iteration")
plt.ylabel("Objective function value")
plt.plot(range(len(objective_func_vals)), objective_func_vals)
plt.show()
# construct neural network classifier
estimator_classifier = NeuralNetworkClassifier(
estimator_qnn, optimizer=COBYLA(maxiter=60), callback=callback_graph
)
# create empty array for callback to store evaluations of the objective function
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
# fit classifier to data
estimator_classifier.fit(X, y)
# return to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
# score classifier
estimator_classifier.score(X, y)
# evaluate data points
y_predict = estimator_classifier.predict(X)
# plot results
# red == wrongly classified
for x, y_target, y_p in zip(X, y, y_predict):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if y_target != y_p:
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
estimator_classifier.weights
# construct a quantum circuit from the default ZZFeatureMap feature map and a customized RealAmplitudes ansatz
qc = QNNCircuit(ansatz=RealAmplitudes(num_inputs, reps=1))
qc.draw(output="mpl")
# parity maps bitstrings to 0 or 1
def parity(x):
return "{:b}".format(x).count("1") % 2
output_shape = 2 # corresponds to the number of classes, possible outcomes of the (parity) mapping.
# construct QNN
sampler_qnn = SamplerQNN(
circuit=qc,
interpret=parity,
output_shape=output_shape,
)
# construct classifier
sampler_classifier = NeuralNetworkClassifier(
neural_network=sampler_qnn, optimizer=COBYLA(maxiter=30), callback=callback_graph
)
# create empty array for callback to store evaluations of the objective function
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
# fit classifier to data
sampler_classifier.fit(X, y01)
# return to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
# score classifier
sampler_classifier.score(X, y01)
# evaluate data points
y_predict = sampler_classifier.predict(X)
# plot results
# red == wrongly classified
for x, y_target, y_p in zip(X, y01, y_predict):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if y_target != y_p:
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
sampler_classifier.weights
# construct feature map, ansatz, and optimizer
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs, reps=1)
# construct variational quantum classifier
vqc = VQC(
feature_map=feature_map,
ansatz=ansatz,
loss="cross_entropy",
optimizer=COBYLA(maxiter=30),
callback=callback_graph,
)
# create empty array for callback to store evaluations of the objective function
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
# fit classifier to data
vqc.fit(X, y_one_hot)
# return to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
# score classifier
vqc.score(X, y_one_hot)
# evaluate data points
y_predict = vqc.predict(X)
# plot results
# red == wrongly classified
for x, y_target, y_p in zip(X, y_one_hot, y_predict):
if y_target[0] == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if not np.all(y_target == y_p):
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
from sklearn.datasets import make_classification
from sklearn.preprocessing import MinMaxScaler
X, y = make_classification(
n_samples=10,
n_features=2,
n_classes=3,
n_redundant=0,
n_clusters_per_class=1,
class_sep=2.0,
random_state=algorithm_globals.random_seed,
)
X = MinMaxScaler().fit_transform(X)
plt.scatter(X[:, 0], X[:, 1], c=y)
y_cat = np.empty(y.shape, dtype=str)
y_cat[y == 0] = "A"
y_cat[y == 1] = "B"
y_cat[y == 2] = "C"
print(y_cat)
vqc = VQC(
num_qubits=2,
optimizer=COBYLA(maxiter=30),
callback=callback_graph,
)
# create empty array for callback to store evaluations of the objective function
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
# fit classifier to data
vqc.fit(X, y_cat)
# return to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
# score classifier
vqc.score(X, y_cat)
predict = vqc.predict(X)
print(f"Predicted labels: {predict}")
print(f"Ground truth: {y_cat}")
num_samples = 20
eps = 0.2
lb, ub = -np.pi, np.pi
X_ = np.linspace(lb, ub, num=50).reshape(50, 1)
f = lambda x: np.sin(x)
X = (ub - lb) * algorithm_globals.random.random([num_samples, 1]) + lb
y = f(X[:, 0]) + eps * (2 * algorithm_globals.random.random(num_samples) - 1)
plt.plot(X_, f(X_), "r--")
plt.plot(X, y, "bo")
plt.show()
# construct simple feature map
param_x = Parameter("x")
feature_map = QuantumCircuit(1, name="fm")
feature_map.ry(param_x, 0)
# construct simple ansatz
param_y = Parameter("y")
ansatz = QuantumCircuit(1, name="vf")
ansatz.ry(param_y, 0)
# construct a circuit
qc = QNNCircuit(feature_map=feature_map, ansatz=ansatz)
# construct QNN
regression_estimator_qnn = EstimatorQNN(circuit=qc)
# construct the regressor from the neural network
regressor = NeuralNetworkRegressor(
neural_network=regression_estimator_qnn,
loss="squared_error",
optimizer=L_BFGS_B(maxiter=5),
callback=callback_graph,
)
# create empty array for callback to store evaluations of the objective function
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
# fit to data
regressor.fit(X, y)
# return to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
# score the result
regressor.score(X, y)
# plot target function
plt.plot(X_, f(X_), "r--")
# plot data
plt.plot(X, y, "bo")
# plot fitted line
y_ = regressor.predict(X_)
plt.plot(X_, y_, "g-")
plt.show()
regressor.weights
vqr = VQR(
feature_map=feature_map,
ansatz=ansatz,
optimizer=L_BFGS_B(maxiter=5),
callback=callback_graph,
)
# create empty array for callback to store evaluations of the objective function
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
# fit regressor
vqr.fit(X, y)
# return to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
# score result
vqr.score(X, y)
# plot target function
plt.plot(X_, f(X_), "r--")
# plot data
plt.plot(X, y, "bo")
# plot fitted line
y_ = vqr.predict(X_)
plt.plot(X_, y_, "g-")
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
from sklearn.datasets import load_iris
iris_data = load_iris()
print(iris_data.DESCR)
features = iris_data.data
labels = iris_data.target
from sklearn.preprocessing import MinMaxScaler
features = MinMaxScaler().fit_transform(features)
import pandas as pd
import seaborn as sns
df = pd.DataFrame(iris_data.data, columns=iris_data.feature_names)
df["class"] = pd.Series(iris_data.target)
sns.pairplot(df, hue="class", palette="tab10")
# Entrenamiento de un modelo clรกsico de aprendizaje automรกtico
from sklearn.model_selection import train_test_split
from qiskit.utils import algorithm_globals
algorithm_globals.random_seed = 123;
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, train_size=0.8, random_state=algorithm_globals.random_seed
)
from sklearn.svm import SVC
svc = SVC()
_ = svc.fit(train_features, train_labels)
train_score_c4 = svc.score(train_features, train_labels)
test_score_c4 = svc.score(test_features, test_labels)
print(f"Classical SVC on the training dataset: {train_score_c4:.2f}")
print(f"Classical SVC on the test dataset: {test_score_c4:.2f}")
# Entrenamiento de un modelo de aprendizaje automรกtico
!pip install qiskit[all]
from qiskit.circuit.library import ZZFeatureMap
num_features = features.shape[1]
feature_map = ZZFeatureMap(feature_dimension=num_features, reps=1)
feature_map.decompose().draw(output="mpl", fold=20)
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(num_qubits=num_features, reps=3)
ansatz.decompose().draw(output="mpl", fold=20)
from qiskit_algorithms.optimizers import COBYLA
optimizer = COBYLA(maxiter=100)
from qiskit.primitives import Sampler
sampler = Sampler()
from matplotlib import pyplot as plt
from IPython.display import clear_output
objective_func_vals = []
plt.rcParams["figure.figsize"] = (12, 6)
def callback_graph(weights, obj_func_eval):
clear_output(wait=True)
objective_func_vals.append(obj_func_eval)
plt.title("Objective function value against iteration")
plt.xlabel("Iteration")
plt.ylabel("Objective function value")
plt.plot(range(len(objective_func_vals)), objective_func_vals)
plt.show()
import time
from qiskit_machine_learning.algorithms.classifiers import VQC
vqc = VQC(
sampler=sampler,
feature_map=feature_map,
ansatz=ansatz,
optimizer=optimizer,
callback=callback_graph,
)
# clear objective value history
objective_func_vals = []
start = time.time()
vqc.fit(train_features, train_labels)
elapsed = time.time() - start
print(f"Training time: {round(elapsed)} seconds")
train_score_q4 = vqc.score(train_features, train_labels)
test_score_q4 = vqc.score(test_features, test_labels)
print(f"Quantum VQC on the training dataset: {train_score_q4:.2f}")
print(f"Quantum VQC on the test dataset: {test_score_q4:.2f}")
# Reducir el nรบmero de funciones
from sklearn.decomposition import PCA
features = PCA(n_components=2).fit_transform(features)
plt.rcParams["figure.figsize"] = (6, 6)
sns.scatterplot(x=features[:, 0], y=features[:, 1], hue=labels, palette="tab10")
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, train_size=0.8, random_state=algorithm_globals.random_seed
)
svc.fit(train_features, train_labels)
train_score_c2 = svc.score(train_features, train_labels)
test_score_c2 = svc.score(test_features, test_labels)
print(f"Classical SVC on the training dataset: {train_score_c2:.2f}")
print(f"Classical SVC on the test dataset: {test_score_c2:.2f}")
num_features = features.shape[1]
feature_map = ZZFeatureMap(feature_dimension=num_features, reps=1)
ansatz = RealAmplitudes(num_qubits=num_features, reps=3)
optimizer = COBYLA(maxiter=40)
vqc = VQC(
sampler=sampler,
feature_map=feature_map,
ansatz=ansatz,
optimizer=optimizer,
callback=callback_graph,
)
# clear objective value history
objective_func_vals = []
# make the objective function plot look nicer.
plt.rcParams["figure.figsize"] = (12, 6)
start = time.time()
vqc.fit(train_features, train_labels)
elapsed = time.time() - start
print(f"Training time: {round(elapsed)} seconds")
train_score_q2_ra = vqc.score(train_features, train_labels)
test_score_q2_ra = vqc.score(test_features, test_labels)
print(f"Quantum VQC on the training dataset using RealAmplitudes: {train_score_q2_ra:.2f}")
print(f"Quantum VQC on the test dataset using RealAmplitudes: {test_score_q2_ra:.2f}")
from qiskit.circuit.library import EfficientSU2
ansatz = EfficientSU2(num_qubits=num_features, reps=3)
optimizer = COBYLA(maxiter=40)
vqc = VQC(
sampler=sampler,
feature_map=feature_map,
ansatz=ansatz,
optimizer=optimizer,
callback=callback_graph,
)
# clear objective value history
objective_func_vals = []
start = time.time()
vqc.fit(train_features, train_labels)
elapsed = time.time() - start
print(f"Training time: {round(elapsed)} seconds")
train_score_q2_eff = vqc.score(train_features, train_labels)
test_score_q2_eff = vqc.score(test_features, test_labels)
print(f"Quantum VQC on the training dataset using EfficientSU2: {train_score_q2_eff:.2f}")
print(f"Quantum VQC on the test dataset using EfficientSU2: {test_score_q2_eff:.2f}")
#Conclusiรณn
print(f"Model | Test Score | Train Score")
print(f"SVC, 4 features | {train_score_c4:10.2f} | {test_score_c4:10.2f}")
print(f"VQC, 4 features, RealAmplitudes | {train_score_q4:10.2f} | {test_score_q4:10.2f}")
print(f"----------------------------------------------------------")
print(f"SVC, 2 features | {train_score_c2:10.2f} | {test_score_c2:10.2f}")
print(f"VQC, 2 features, RealAmplitudes | {train_score_q2_ra:10.2f} | {test_score_q2_ra:10.2f}")
print(f"VQC, 2 features, EfficientSU2 | {train_score_q2_eff:10.2f} | {test_score_q2_eff:10.2f}")
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
!pip install qiskit-machine-learning
!pip install --upgrade matplotlib
!pip install pylatexenc
!pip install pillow
# Necessary imports
import numpy as np
import matplotlib.pyplot as plt
from torch import Tensor
from torch.nn import Linear, CrossEntropyLoss, MSELoss
from torch.optim import LBFGS
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit_algorithms.utils import algorithm_globals
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.connectors import TorchConnector
# Set seed for random generators
algorithm_globals.random_seed = 42
#Generate random dataset
# Select dataset dimension (num_inputs) and size (num_samples)
num_inputs = 2
num_samples = 20
# Generate random input coordinates (X) and binary labels (y)
X = 2 * algorithm_globals.random.random([num_samples, num_inputs]) - 1
y01 = 1 * (np.sum(X, axis=1) >= 0) # in { 0, 1}, y01 will be used for SamplerQNN example
y = 2 * y01 - 1 # in {-1, +1}, y will be used for EstimatorQNN example
# Convert to torch Tensors
X_ = Tensor(X)
y01_ = Tensor(y01).reshape(len(y)).long()
y_ = Tensor(y).reshape(len(y), 1)
# Plot dataset
for x, y_target in zip(X, y):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
# Set up a circuit
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs)
qc = QuantumCircuit(num_inputs)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
qc.draw("mpl")
# Setup QNN
qnn1 = EstimatorQNN(
circuit=qc, input_params=feature_map.parameters, weight_params=ansatz.parameters
)
# Set up PyTorch module
# Note: If we don't explicitly declare the initial weights
# they are chosen uniformly at random from [-1, 1].
initial_weights = 0.1 * (2 * algorithm_globals.random.random(qnn1.num_weights) - 1)
model1 = TorchConnector(qnn1, initial_weights=initial_weights)
print("Initial weights: ", initial_weights)
#Test with a single input
model1(X_[0, :])
# Define optimizer and loss
optimizer = LBFGS(model1.parameters())
f_loss = MSELoss(reduction="sum")
# Start training
model1.train() # set model to training mode
# Note from (https://pytorch.org/docs/stable/optim.html):
# Some optimization algorithms such as LBFGS need to
# reevaluate the function multiple times, so you have to
# pass in a closure that allows them to recompute your model.
# The closure should clear the gradients, compute the loss,
# and return it.
def closure():
optimizer.zero_grad() # Initialize/clear gradients
loss = f_loss(model1(X_), y_) # Evaluate loss function
loss.backward() # Backward pass
print(loss.item()) # Print loss
return loss
# Run optimizer step4
optimizer.step(closure)
# Evaluate model and compute accuracy
model1.eval()
y_predict = []
for x, y_target in zip(X, y):
output = model1(Tensor(x))
y_predict += [np.sign(output.detach().numpy())[0]]
print("Accuracy:", sum(y_predict == y) / len(y))
# Plot results
# red == wrongly classified
for x, y_target, y_p in zip(X, y, y_predict):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if y_target != y_p:
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
#Define feature map and ansatz
feature_map = ZZFeatureMap(num_inputs)
ansatz = RealAmplitudes(num_inputs, entanglement="linear", reps=1)
# Define quantum circuit of num_qubits = input dim
# Append feature map and ansatz
qc = QuantumCircuit(num_inputs)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
# Define SamplerQNN and initial setup
parity = lambda x: "{:b}".format(x).count("1") % 2 # optional interpret function
output_shape = 2 # parity = 0, 1
qnn2 = SamplerQNN(
circuit=qc,
input_params=feature_map.parameters,
weight_params=ansatz.parameters,
interpret=parity,
output_shape=output_shape,
)
# Set up PyTorch module
# Reminder: If we don't explicitly declare the initial weights
# they are chosen uniformly at random from [-1, 1].
initial_weights = 0.1 * (2 * algorithm_globals.random.random(qnn2.num_weights) - 1)
print("Initial weights: ", initial_weights)
model2 = TorchConnector(qnn2, initial_weights)
# Define model, optimizer, and loss
optimizer = LBFGS(model2.parameters())
f_loss = CrossEntropyLoss() # Our output will be in the [0,1] range
# Start training
model2.train()
# Define LBFGS closure method (explained in previous section)
def closure():
optimizer.zero_grad(set_to_none=True) # Initialize gradient
loss = f_loss(model2(X_), y01_) # Calculate loss
loss.backward() # Backward pass
print(loss.item()) # Print loss
return loss
# Run optimizer (LBFGS requires closure)
optimizer.step(closure);
# Evaluate model and compute accuracy
model2.eval()
y_predict = []
for x in X:
output = model2(Tensor(x))
y_predict += [np.argmax(output.detach().numpy())]
print("Accuracy:", sum(y_predict == y01) / len(y01))
# plot results
# red == wrongly classified
for x, y_target, y_ in zip(X, y01, y_predict):
if y_target == 1:
plt.plot(x[0], x[1], "bo")
else:
plt.plot(x[0], x[1], "go")
if y_target != y_:
plt.scatter(x[0], x[1], s=200, facecolors="none", edgecolors="r", linewidths=2)
plt.plot([-1, 1], [1, -1], "--", color="black")
plt.show()
# Generate random dataset
num_samples = 20
eps = 0.2
lb, ub = -np.pi, np.pi
f = lambda x: np.sin(x)
X = (ub - lb) * algorithm_globals.random.random([num_samples, 1]) + lb
y = f(X) + eps * (2 * algorithm_globals.random.random([num_samples, 1]) - 1)
plt.plot(np.linspace(lb, ub), f(np.linspace(lb, ub)), "r--")
plt.plot(X, y, "bo")
plt.show()
# Construct simple feature map
param_x = Parameter("x")
feature_map = QuantumCircuit(1, name="fm")
feature_map.ry(param_x, 0)
# Construct simple parameterized ansatz
param_y = Parameter("y")
ansatz = QuantumCircuit(1, name="vf")
ansatz.ry(param_y, 0)
qc = QuantumCircuit(1)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
# Construct QNN
qnn3 = EstimatorQNN(circuit=qc, input_params=[param_x], weight_params=[param_y])
# Set up PyTorch module
# Reminder: If we don't explicitly declare the initial weights
# they are chosen uniformly at random from [-1, 1].
initial_weights = 0.1 * (2 * algorithm_globals.random.random(qnn3.num_weights) - 1)
model3 = TorchConnector(qnn3, initial_weights)
# Define optimizer and loss function
optimizer = LBFGS(model3.parameters())
f_loss = MSELoss(reduction="sum")
# Start training
model3.train() # set model to training mode
# Define objective function
def closure():
optimizer.zero_grad(set_to_none=True) # Initialize gradient
loss = f_loss(model3(Tensor(X)), Tensor(y)) # Compute batch loss
loss.backward() # Backward pass
print(loss.item()) # Print loss
return loss
# Run optimizer
optimizer.step(closure)
# Plot target function
plt.plot(np.linspace(lb, ub), f(np.linspace(lb, ub)), "r--")
# Plot data
plt.plot(X, y, "bo")
# Plot fitted line
model3.eval()
y_ = []
for x in np.linspace(lb, ub):
output = model3(Tensor([x]))
y_ += [output.detach().numpy()[0]]
plt.plot(np.linspace(lb, ub), y_, "g-")
plt.show()
# Additional torch-related imports
import torch
from torch import cat, no_grad, manual_seed
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import (
Module,
Conv2d,
Linear,
Dropout2d,
NLLLoss,
MaxPool2d,
Flatten,
Sequential,
ReLU,
)
import torch.nn.functional as F
# Train Dataset
# -------------
# Set train shuffle seed (for reproducibility)
manual_seed(42)
batch_size = 1
n_samples = 100 # We will concentrate on the first 100 samples
# Use pre-defined torchvision function to load MNIST train data
X_train = datasets.MNIST(
root="./data", train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
# Filter out labels (originally 0-9), leaving only labels 0 and 1
idx = np.append(
np.where(X_train.targets == 0)[0][:n_samples], np.where(X_train.targets == 1)[0][:n_samples]
)
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
# Define torch dataloader with filtered data
train_loader = DataLoader(X_train, batch_size=batch_size, shuffle=True)
# Quick visualization of images of handwritten 0s and 1s
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap="gray")
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets[0].item()))
n_samples_show -= 1
# Test Dataset
# -------------
# Set test shuffle seed (for reproducibility)
# manual_seed(5)
n_samples = 50
# Use pre-defined torchvision function to load MNIST test data
X_test = datasets.MNIST(
root="./data", train=False, download=True, transform=transforms.Compose([transforms.ToTensor()])
)
# Filter out labels (originally 0-9), leaving only labels 0 and 1
idx = np.append(
np.where(X_test.targets == 0)[0][:n_samples], np.where(X_test.targets == 1)[0][:n_samples]
)
X_test.data = X_test.data[idx]
X_test.targets = X_test.targets[idx]
# Define torch dataloader with filtered data
test_loader = DataLoader(X_test, batch_size=batch_size, shuffle=True)
# Define and create QNN
def create_qnn():
feature_map = ZZFeatureMap(2)
ansatz = RealAmplitudes(2, reps=1)
qc = QuantumCircuit(2)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
# REMEMBER TO SET input_gradients=True FOR ENABLING HYBRID GRADIENT BACKPROP
qnn = EstimatorQNN(
circuit=qc,
input_params=feature_map.parameters,
weight_params=ansatz.parameters,
input_gradients=True,
)
return qnn
qnn4 = create_qnn()
# Define torch NN module
class Net(Module):
def __init__(self, qnn):
super().__init__()
self.conv1 = Conv2d(1, 2, kernel_size=5)
self.conv2 = Conv2d(2, 16, kernel_size=5)
self.dropout = Dropout2d()
self.fc1 = Linear(256, 64)
self.fc2 = Linear(64, 2) # 2-dimensional input to QNN
self.qnn = TorchConnector(qnn) # Apply torch connector, weights chosen
# uniformly at random from interval [-1,1].
self.fc3 = Linear(1, 1) # 1-dimensional output from QNN
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # apply QNN
x = self.fc3(x)
return cat((x, 1 - x), -1)
model4 = Net(qnn4)
# Define model, optimizer, and loss function
optimizer = optim.Adam(model4.parameters(), lr=0.001)
loss_func = NLLLoss()
# Start training
epochs = 10 # Set number of epochs
loss_list = [] # Store loss history
model4.train() # Set model to training mode
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Initialize gradient
output = model4(data) # Forward pass
loss = loss_func(output, target) # Calculate loss
loss.backward() # Backward pass
optimizer.step() # Optimize weights
total_loss.append(loss.item()) # Store loss
loss_list.append(sum(total_loss) / len(total_loss))
print("Training [{:.0f}%]\tLoss: {:.4f}".format(100.0 * (epoch + 1) / epochs, loss_list[-1]))
# Plot loss convergence
plt.plot(loss_list)
plt.title("Hybrid NN Training Convergence")
plt.xlabel("Training Iterations")
plt.ylabel("Neg. Log Likelihood Loss")
plt.show()
#Save the trained model
torch.save(model4.state_dict(), "model4.pt")
#Recreating the model and loading the state from the previously saved file.
qnn5 = create_qnn()
model5 = Net(qnn5)
model5.load_state_dict(torch.load("model4.pt"))
model5.eval() # set model to evaluation mode
with no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model5(data)
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print(
"Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%".format(
sum(total_loss) / len(total_loss), correct / len(test_loader) / batch_size * 100
)
)
# Plot predicted labels
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model5.eval()
with no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model5(data[0:1])
if len(output.shape) == 1:
output = output.reshape(1, *output.shape)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(data[0].numpy().squeeze(), cmap="gray")
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title("Predicted {}".format(pred.item()))
count += 1
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
from sklearn.datasets import make_blobs
# example dataset
features, labels = make_blobs(n_samples=20, n_features=2, centers=2, random_state=3, shuffle=True)
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
features = MinMaxScaler(feature_range=(0, np.pi)).fit_transform(features)
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, train_size=15, shuffle=False
)
# number of qubits is equal to the number of features
num_qubits = 2
# number of steps performed during the training procedure
tau = 100
# regularization parameter
C = 1000
!pip install qiskit
!pip install qiskit_algorithms
!pip install qiskit_machine_learning
from qiskit import BasicAer
from qiskit.circuit.library import ZFeatureMap
from qiskit_algorithms.utils import algorithm_globals
from qiskit_machine_learning.kernels import FidelityQuantumKernel
algorithm_globals.random_seed = 12345
feature_map = ZFeatureMap(feature_dimension=num_qubits, reps=1)
qkernel = FidelityQuantumKernel(feature_map=feature_map)
from qiskit_machine_learning.algorithms import PegasosQSVC
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel, C=C, num_steps=tau)
# training
pegasos_qsvc.fit(train_features, train_labels)
# testing
pegasos_score = pegasos_qsvc.score(test_features, test_labels)
print(f"PegasosQSVC classification test score: {pegasos_score}")
grid_step = 0.2
margin = 0.2
grid_x, grid_y = np.meshgrid(
np.arange(-margin, np.pi + margin, grid_step), np.arange(-margin, np.pi + margin, grid_step)
)
meshgrid_features = np.column_stack((grid_x.ravel(), grid_y.ravel()))
meshgrid_colors = pegasos_qsvc.predict(meshgrid_features)
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 5))
meshgrid_colors = meshgrid_colors.reshape(grid_x.shape)
plt.pcolormesh(grid_x, grid_y, meshgrid_colors, cmap="RdBu", shading="auto")
plt.scatter(
train_features[:, 0][train_labels == 0],
train_features[:, 1][train_labels == 0],
marker="s",
facecolors="w",
edgecolors="r",
label="A train",
)
plt.scatter(
train_features[:, 0][train_labels == 1],
train_features[:, 1][train_labels == 1],
marker="o",
facecolors="w",
edgecolors="b",
label="B train",
)
plt.scatter(
test_features[:, 0][test_labels == 0],
test_features[:, 1][test_labels == 0],
marker="s",
facecolors="r",
edgecolors="r",
label="A test",
)
plt.scatter(
test_features[:, 0][test_labels == 1],
test_features[:, 1][test_labels == 1],
marker="o",
facecolors="b",
edgecolors="b",
label="B test",
)
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0)
plt.title("Pegasos Classification")
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
import matplotlib.pyplot as plt
import numpy as np
!pip install qiskit
from qiskit.circuit.library import RealAmplitudes
from qiskit.primitives import Sampler
!pip install qiskit_algorithms
from qiskit_algorithms.optimizers import COBYLA
from qiskit_algorithms.utils import algorithm_globals
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
!pip install qiskit_machine_learning
from qiskit_machine_learning.algorithms.classifiers import VQC
from IPython.display import clear_output
algorithm_globals.random_seed = 42
sampler1 = Sampler()
sampler2 = Sampler()
num_samples = 40
num_features = 2
features = 2 * algorithm_globals.random.random([num_samples, num_features]) - 1
labels = 1 * (np.sum(features, axis=1) >= 0) # in { 0, 1}
features = MinMaxScaler().fit_transform(features)
features.shape
features[0:5, :]
labels = OneHotEncoder(sparse_output=False).fit_transform(labels.reshape(-1, 1))
labels.shape
labels[0:5, :]
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, train_size=30, random_state=algorithm_globals.random_seed
)
train_features.shape
def plot_dataset():
plt.scatter(
train_features[np.where(train_labels[:, 0] == 0), 0],
train_features[np.where(train_labels[:, 0] == 0), 1],
marker="o",
color="b",
label="Label 0 train",
)
plt.scatter(
train_features[np.where(train_labels[:, 0] == 1), 0],
train_features[np.where(train_labels[:, 0] == 1), 1],
marker="o",
color="g",
label="Label 1 train",
)
plt.scatter(
test_features[np.where(test_labels[:, 0] == 0), 0],
test_features[np.where(test_labels[:, 0] == 0), 1],
marker="o",
facecolors="w",
edgecolors="b",
label="Label 0 test",
)
plt.scatter(
test_features[np.where(test_labels[:, 0] == 1), 0],
test_features[np.where(test_labels[:, 0] == 1), 1],
marker="o",
facecolors="w",
edgecolors="g",
label="Label 1 test",
)
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0)
plt.plot([1, 0], [0, 1], "--", color="black")
plot_dataset()
plt.show()
maxiter = 20
objective_values = []
# callback function that draws a live plot when the .fit() method is called
def callback_graph(_, objective_value):
clear_output(wait=True)
objective_values.append(objective_value)
plt.title("Objective function value against iteration")
plt.xlabel("Iteration")
plt.ylabel("Objective function value")
stage1_len = np.min((len(objective_values), maxiter))
stage1_x = np.linspace(1, stage1_len, stage1_len)
stage1_y = objective_values[:stage1_len]
stage2_len = np.max((0, len(objective_values) - maxiter))
stage2_x = np.linspace(maxiter, maxiter + stage2_len - 1, stage2_len)
stage2_y = objective_values[maxiter : maxiter + stage2_len]
plt.plot(stage1_x, stage1_y, color="orange")
plt.plot(stage2_x, stage2_y, color="purple")
plt.show()
plt.rcParams["figure.figsize"] = (12, 6)
original_optimizer = COBYLA(maxiter=maxiter)
ansatz = RealAmplitudes(num_features)
initial_point = np.asarray([0.5] * ansatz.num_parameters)
original_classifier = VQC(
ansatz=ansatz, optimizer=original_optimizer, callback=callback_graph, sampler=sampler1
)
original_classifier.fit(train_features, train_labels)
print("Train score", original_classifier.score(train_features, train_labels))
print("Test score ", original_classifier.score(test_features, test_labels))
original_classifier.save("vqc_classifier.model")
loaded_classifier = VQC.load("vqc_classifier.model")
loaded_classifier.warm_start = True
loaded_classifier.neural_network.sampler = sampler2
loaded_classifier.optimizer = COBYLA(maxiter=80)
loaded_classifier.fit(train_features, train_labels)
print("Train score", loaded_classifier.score(train_features, train_labels))
print("Test score", loaded_classifier.score(test_features, test_labels))
train_predicts = loaded_classifier.predict(train_features)
test_predicts = loaded_classifier.predict(test_features)
# return plot to default figsize
plt.rcParams["figure.figsize"] = (6, 4)
plot_dataset()
# plot misclassified data points
plt.scatter(
train_features[np.all(train_labels != train_predicts, axis=1), 0],
train_features[np.all(train_labels != train_predicts, axis=1), 1],
s=200,
facecolors="none",
edgecolors="r",
linewidths=2,
)
plt.scatter(
test_features[np.all(test_labels != test_predicts, axis=1), 0],
test_features[np.all(test_labels != test_predicts, axis=1), 1],
s=200,
facecolors="none",
edgecolors="r",
linewidths=2,
)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
!pip install qiskit
!pip install qiskit-aer
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
def to_numbers(tensor_list):
num_list = []
for tensor in tensor_list:
num_list += [tensor.item()]
return num_list
import numpy as np
import torch
from torch.autograd import Function
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, execute
from qiskit.circuit import Parameter
from qiskit import Aer
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts()
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
import torchvision
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_trainset = datasets.CIFAR10(root='./data1', train=True, download=True, transform=transform)
labels = cifar_trainset.targets # get the labels for the data
labels = np.array(labels)
idx1 = np.where(labels == 0) # filter on aeroplanes
idx2 = np.where(labels == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=100
# concatenate the data indices
idx = np.concatenate((idx1[0][0:n],idx2[0][0:n]))
# create the filtered dataset for our training set
cifar_trainset.targets = labels[idx]
cifar_trainset.data = cifar_trainset.data[idx]
train_loader = torch.utils.data.DataLoader(cifar_trainset, batch_size=1, shuffle=True)
import numpy as np
import matplotlib.pyplot as plt
n_samples_show = 8
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 2))
while n_samples_show > 0:
images, targets = data_iter.__next__()
images=images.squeeze()
#axes[n_samples_show - 1].imshow( tf.shape( tf.squeeze(images[0]) ),cmap='gray' )
#plt.imshow((tf.squeeze(images[0])))
#plt.imshow( tf.shape( tf.squeeze(x_train) ) )
#axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].imshow(images[0].numpy(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
#Testing data
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) # transform images to tensors/vectors
cifar_testset = datasets.CIFAR10(root='./data1', train=False, download=True, transform=transform)
labels1 = cifar_testset.targets # get the labels for the data
labels1 = np.array(labels1)
idx1_ae = np.where(labels1 == 0) # filter on aeroplanes
idx2_au = np.where(labels1 == 1) # filter on automobiles
# Specify number of datapoints per class (i.e. there will be n pictures of automobiles and n pictures of aeroplanes in the training set)
n=50
# concatenate the data indices
idxa = np.concatenate((idx1_ae[0][0:n],idx2_au[0][0:n]))
# create the filtered dataset for our training set
cifar_testset.targets = labels[idxa]
cifar_testset.data = cifar_testset.data[idxa]
test_loader = torch.utils.data.DataLoader(cifar_testset, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x) # Asumiendo que `hybrid` es una instancia de `Hybrid`
x = (x + 1) / 2
x = torch.cat((x, 1 - x), -1)
return x
# qc = TorchCircuit.apply
"""
Ignore this cell
"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.h1 = nn.Linear(500, 500)
self.h2 = nn.Linear(500, 1)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.h1(x))
x = F.dropout(x, training=self.training)
x = self.h2(x)
x = qc(x)
x = (x + 1) / 2 # Normalise the inputs to 1 or 0
x = torch.cat((x, 1 - x), -1)
return x
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 5
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(100. * (epoch + 1) / epochs, loss_list[-1]))
#Now plotting the training graph
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list1 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list1.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list1[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list1)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list2 = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list2.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list2[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list2)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 30
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
#Alongside, let's also plot the data
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
import torch
import torchvision
import torchvision.transforms as transforms
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 10
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
#Testing the quantum hybrid in order to comapre it with the classical one
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(500, 500)
self.fc2 = nn.Linear(500,49)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 99)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x)
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
!pip install qiskit torch torchvision matplotlib
!pip install qiskit-machine-learning
!pip install torchviz
!pip install qiskit[all]
!pip install qiskit == 0.45.2
!pip install qiskit_algorithms == 0.7.1
!pip install qiskit-ibm-runtime == 0.17.0
!pip install qiskit-aer == 0.13.2
#Quentum net draw
!pip install pylatexenc
# PyTorch
import torch
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import Module, Conv2d, Linear, Dropout2d, CrossEntropyLoss
import torch.nn.functional as F
from torchviz import make_dot
from torch import Tensor
from torch import cat
# Qiskit
from qiskit import Aer
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks.estimator_qnn import EstimatorQNN
from qiskit_machine_learning.circuit.library import QNNCircuit
# Visualization
import matplotlib.pyplot as plt
import numpy as np
# Folder direction
train_data = datasets.ImageFolder('/content/drive/MyDrive/QCNN/Data-set/Train', transform=transforms.Compose([transforms.ToTensor()]))
test_data = datasets.ImageFolder('/content/drive/MyDrive/QCNN/Data-set/Test', transform=transforms.Compose([transforms.ToTensor()]))
#e train and test Tensor size
print(f"Data tensor Dimension:",train_data[0][0].shape)
#Convert to DataLoader
train_loader = DataLoader(train_data, shuffle=True, batch_size=1)
test_loader = DataLoader(test_data, shuffle=True, batch_size=1)
#Show the labels
print((train_loader.dataset.class_to_idx))
n_samples_show = 5
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 10))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap=plt.cm.rainbow)
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title(f"Labeled: {targets[0].item()}")
n_samples_show -= 1
# batch size
batch_size = 10
# Quantum Neural Networ model
def create_qnn():
qnn_circuit = QNNCircuit(2)
qnn = EstimatorQNN(circuit=qnn_circuit)
return qnn
qnn = create_qnn()
print(qnn.circuit)
# Calcular dinรกmicamente el tamaรฑo de entrada para fc1
def get_conv_output_size(model, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = model._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
#Definimos red neuronal en PyTorch
class Net(Module):
def __init__(self, qnn):
super(Net, self).__init__()
self.conv1 = Conv2d(3, 24, kernel_size=5)
self.conv2 = Conv2d(24, 48, kernel_size=5)
self.dropout = Dropout2d()
# Calcular dinรกmicamente el tamaรฑo de entrada para fc1
#self.conv_output_size = self._get_conv_output_size((3, 432, 432))
self.fc1 = Linear(529200, 512) # Reducir el nรบmero de neuronas en fc1
self.fc2 = Linear(512, 2) # Salida 2 para dos clases
self.qnn = TorchConnector(qnn)
self.fc3 = Linear(1, 1) # Salida 2 para dos clases
def _get_conv_output_size(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
print("Tamaรฑo calculado:", n_size)
return n_size
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # Aplicamos la red cuรกntica nuevamente en la secciรณn forward
x = self.fc3(x)
return cat((x, 1 - x), -1)
# Crea una instancia del modelo
model = Net(qnn)
# Imprimir el modelo
print(model)
print(f"Device: {next(model.parameters()).device}")
#dummy_tensor = next(iter(train_loader))[0].to('cuda')
dummy_tensor = next(iter(train_loader))[0]
output = model(dummy_tensor)
params = dict(list(model.named_parameters()))
# Concatenamos los tensores utilizando torch.cat en lugar de cat
concatenated_output = torch.cat((output, 1 - output), -1)
make_dot(concatenated_output, params=params).render("rnn_torchviz", format="png")
# Definimos optimizador y funciรณn de pรฉrdida
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = CrossEntropyLoss()
# Empezamos entrenamiento
epochs = 3 # Nรบmero de รฉpocas
loss_list = []
model.train() # Modelo en modo entrenamiento
for epoch in range(epochs):
correct = 0
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Se inicializa gradiente
output = model(data)
loss = loss_func(output, target)
loss.backward() # Backward pass
optimizer.step() # Optimizamos pesos
total_loss.append(loss.item()) # Cรกlculo de la funciรณn de pรฉrdida
train_pred = output.argmax(dim=1, keepdim=True)
correct += train_pred.eq(target.view_as(train_pred)).sum().item()
loss_list.append(sum(total_loss) / len(total_loss))
accuracy = 100 * correct / len(train_loader) #Cรกlculo de precisiรณn
print(f"Training [{100.0 * (epoch + 1) / epochs:.0f}%]\tLoss: {loss_list[-1]:.4f}\tAccuracy: {accuracy:.2f}%")
# Evaluar el modelo en el conjunto de prueba
model.eval()
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_pred = output.argmax(dim=1, keepdim=True)
correct += test_pred.eq(target.view_as(test_pred)).sum().item()
accuracy = 100 * correct / len(test_loader.dataset)
print(f"Accuracy on test set: {accuracy:.2f}%")
# Graficar la pรฉrdida durante el entrenamiento
plt.plot(loss_list)
plt.xlabel('รpoca')
plt.ylabel('Pรฉrdida')
plt.title('Pรฉrdida durante el entrenamiento')
plt.show()
# Guardar los pesos del modelo
torch.save(model.state_dict(), '/content/drive/MyDrive/QCNN/Weigth/Epoca_3_modelo_pesos.pth')
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, roc_curve, auc
# Evaluar el modelo en el conjunto de prueba
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_pred = output.argmax(dim=1, keepdim=True)
all_preds.append(test_pred.item())
all_targets.append(target.item())
# Calcular y mostrar mรฉtricas adicionales
accuracy = accuracy_score(all_targets, all_preds)
precision = precision_score(all_targets, all_preds)
recall = recall_score(all_targets, all_preds)
f1 = f1_score(all_targets, all_preds)
print(f"Accuracy: {accuracy:.2f}")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 Score: {f1:.2f}")
# Mostrar el informe de clasificaciรณn
print("\nClassification Report:")
print(classification_report(all_targets, all_preds))
# Evaluar el modelo en el conjunto de prueba
model.eval()
all_probs = []
with torch.no_grad():
for data, target in test_loader:
output = model(data)
probs = output[:, 1].numpy() # Probabilidades de pertenecer a la clase 1
all_probs.append(probs)
# Concatenar las probabilidades de todas las muestras
all_probs = np.concatenate(all_probs)
# Calcular la curva ROC y el AUC
fpr, tpr, thresholds = roc_curve(all_targets, all_probs)
roc_auc = auc(fpr, tpr)
# Mostrar la curva ROC y el AUC
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'AUC = {roc_auc:.2f}')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Tasa de Falsos Positivos (FPR)')
plt.ylabel('Tasa de Verdaderos Positivos (TPR)')
plt.title('Curva ROC')
plt.legend(loc="lower right")
plt.show()
|
https://github.com/Qubico-Hack/tutorials
|
Qubico-Hack
|
!pip install qiskit torch torchvision matplotlib
!pip install qiskit-machine-learning
!pip install torchviz
!pip install qiskit[all]
!pip install qiskit == 0.45.2
!pip install qiskit_algorithms == 0.7.1
!pip install qiskit-ibm-runtime == 0.17.0
!pip install qiskit-aer == 0.13.2
#Quentum net draw
!pip install pylatexenc
# PyTorch
import torch
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms
import torch.optim as optim
from torch.nn import Module, Conv2d, Linear, Dropout2d, CrossEntropyLoss
import torch.nn.functional as F
from torchviz import make_dot
from torch import Tensor
from torch import cat
# Qiskit
from qiskit import Aer
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks.estimator_qnn import EstimatorQNN
from qiskit_machine_learning.circuit.library import QNNCircuit
from google.colab import drive
drive.mount('/content/drive')
# Visualization
import matplotlib.pyplot as plt
import numpy as np
# Folder direction
train_data = datasets.ImageFolder('/content/drive/MyDrive/QCNN/Data-set/Train', transform=transforms.Compose([transforms.ToTensor()]))
test_data = datasets.ImageFolder('/content/drive/MyDrive/QCNN/Data-set/Test', transform=transforms.Compose([transforms.ToTensor()]))
#e train and test Tensor size
print(f"Data tensor Dimension:",train_data[0][0].shape)
#Convert to DataLoader
train_loader = DataLoader(train_data, shuffle=True, batch_size=1)
test_loader = DataLoader(test_data, shuffle=True, batch_size=1)
#Show the labels
print((train_loader.dataset.class_to_idx))
n_samples_show = 5
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 10))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0, 0].numpy().squeeze(), cmap=plt.cm.rainbow)
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title(f"Labeled: {targets[0].item()}")
n_samples_show -= 1
# batch size
batch_size = 10
# Quantum Neural Networ model
def create_qnn():
qnn_circuit = QNNCircuit(2)
qnn = EstimatorQNN(circuit=qnn_circuit)
return qnn
qnn = create_qnn()
print(qnn.circuit)
# Calcular dinรกmicamente el tamaรฑo de entrada para fc1
def get_conv_output_size(model, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = model._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
return n_size
#Definimos red neuronal en PyTorch
class Net(Module):
def __init__(self, qnn):
super(Net, self).__init__()
self.conv1 = Conv2d(3, 24, kernel_size=5)
self.conv2 = Conv2d(24, 48, kernel_size=5)
self.dropout = Dropout2d()
# Calcular dinรกmicamente el tamaรฑo de entrada para fc1
#self.conv_output_size = self._get_conv_output_size((3, 432, 432))
self.fc1 = Linear(529200, 512) # Reducir el nรบmero de neuronas en fc1
self.fc2 = Linear(512, 2) # Salida 2 para dos clases
self.qnn = TorchConnector(qnn)
self.fc3 = Linear(1, 1) # Salida 2 para dos clases
def _get_conv_output_size(self, shape):
batch_size = 1
input = torch.autograd.Variable(torch.rand(batch_size, *shape))
output_feat = self._forward_features(input)
n_size = output_feat.data.view(batch_size, -1).size(1)
print("Tamaรฑo calculado:", n_size)
return n_size
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.qnn(x) # Aplicamos la red cuรกntica nuevamente en la secciรณn forward
x = self.fc3(x)
return cat((x, 1 - x), -1)
# Crea una instancia del modelo
model = Net(qnn)
# Imprimir el modelo
print(model)
print(f"Device: {next(model.parameters()).device}")
#dummy_tensor = next(iter(train_loader))[0].to('cuda')
dummy_tensor = next(iter(train_loader))[0]
output = model(dummy_tensor)
params = dict(list(model.named_parameters()))
# Concatenamos los tensores utilizando torch.cat en lugar de cat
concatenated_output = torch.cat((output, 1 - output), -1)
make_dot(concatenated_output, params=params).render("rnn_torchviz", format="png")
# Definimos optimizador y funciรณn de pรฉrdida
optimizer = optim.Adam(model.parameters(), lr=0.0001)
loss_func = CrossEntropyLoss()
# Empezamos entrenamiento
epochs = 3 # Nรบmero de รฉpocas
loss_list = []
model.train() # Modelo en modo entrenamiento
for epoch in range(epochs):
correct = 0
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad(set_to_none=True) # Se inicializa gradiente
output = model(data)
loss = loss_func(output, target)
loss.backward() # Backward pass
optimizer.step() # Optimizamos pesos
total_loss.append(loss.item()) # Cรกlculo de la funciรณn de pรฉrdida
train_pred = output.argmax(dim=1, keepdim=True)
correct += train_pred.eq(target.view_as(train_pred)).sum().item()
loss_list.append(sum(total_loss) / len(total_loss))
accuracy = 100 * correct / len(train_loader) #Cรกlculo de precisiรณn
print(f"Training [{100.0 * (epoch + 1) / epochs:.0f}%]\tLoss: {loss_list[-1]:.4f}\tAccuracy: {accuracy:.2f}%")
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import numpy as np
# Parameters
J = 1
k_B = 1
T = 2.269 # Critical temperature
K = J / (k_B * T)
# Transfer matrix for a 2x2 Ising model (simplified example)
def transfer_matrix(K):
T = np.zeros((4, 4))
configs = [(-1, -1), (-1, 1), (1, -1), (1, 1)]
for i, (s1, s2) in enumerate(configs):
for j, (s3, s4) in enumerate(configs):
E = K * (s1*s2 + s2*s3 + s3*s4 + s4*s1)
T[i, j] = np.exp(E)
return T
T_matrix = transfer_matrix(K)
eigenvalues, _ = np.linalg.eig(T_matrix)
lambda_max = np.max(eigenvalues)
# Free energy per spin
f = -k_B * T * np.log(lambda_max)
print(f"Free energy per spin: {f:.4f}")
# Output the transfer matrix and its largest eigenvalue
print("Transfer matrix:\n", T_matrix)
print("Largest eigenvalue:", lambda_max)
import numpy as np
import random
import math
def MC_step(config, beta):
'''Monte Carlo move using Metropolis algorithm '''
L = len(config)
for _ in range(L * L): # Corrected the loop
a = np.random.randint(0, L)
b = np.random.randint(0, L)
sigma = config[a, b]
neighbors = (config[(a + 1) % L, b] + config[a, (b + 1) % L] +
config[(a - 1) % L, b] + config[a, (b - 1) % L])
del_E = 2 * sigma * neighbors
if del_E < 0 or random.uniform(0, 1) < np.exp(-del_E * beta):
config[a, b] = -sigma
return config
def E_dimensionless(config, L):
'''Calculate the energy of the configuration'''
energy = 0
for i in range(L):
for j in range(L):
S = config[i, j]
neighbors = (config[(i + 1) % L, j] + config[i, (j + 1) % L] +
config[(i - 1) % L, j] + config[i, (j - 1) % L])
energy += -neighbors * S
return energy / 4 # To compensate for overcounting
def magnetization(config):
'''Calculate the magnetization of the configuration'''
return np.sum(config)
def calcul_energy_mag_C_X(config, L, eqSteps, err_runs):
print('finished')
nt = 100 # number of temperature points
mcSteps = 1000
T_c = 2 / math.log(1 + math.sqrt(2))
T = np.linspace(1., 7., nt)
E, M, C, X = np.zeros(nt), np.zeros(nt), np.zeros(nt), np.zeros(nt)
C_theoric, M_theoric = np.zeros(nt), np.zeros(nt)
delta_E, delta_M, delta_C, delta_X = np.zeros(nt), np.zeros(nt), np.zeros(nt), np.zeros(nt)
n1 = 1.0 / (mcSteps * L * L)
n2 = 1.0 / (mcSteps * mcSteps * L * L)
Energies, Magnetizations, SpecificHeats, Susceptibilities = [], [], [], []
delEnergies, delMagnetizations, delSpecificHeats, delSusceptibilities = [], [], [], []
for t in range(nt):
beta = 1. / T[t]
# Equilibrate the system
for _ in range(eqSteps):
MC_step(config, beta)
Ez, Cz, Mz, Xz = [], [], [], []
for _ in range(err_runs):
E, E_squared, M, M_squared = 0, 0, 0, 0
for _ in range(mcSteps):
MC_step(config, beta)
energy = E_dimensionless(config, L)
mag = abs(magnetization(config))
E += energy
E_squared += energy ** 2
M += mag
M_squared += mag ** 2
E_mean = E / mcSteps
E_squared_mean = E_squared / mcSteps
M_mean = M / mcSteps
M_squared_mean = M_squared / mcSteps
Energy = E_mean / (L ** 2)
SpecificHeat = beta ** 2 * (E_squared_mean - E_mean ** 2) / (L ** 2)
Magnetization = M_mean / (L ** 2)
Susceptibility = beta * (M_squared_mean - M_mean ** 2) / (L ** 2)
Ez.append(Energy)
Cz.append(SpecificHeat)
Mz.append(Magnetization)
Xz.append(Susceptibility)
Energies.append(np.mean(Ez))
delEnergies.append(np.std(Ez))
Magnetizations.append(np.mean(Mz))
delMagnetizations.append(np.std(Mz))
SpecificHeats.append(np.mean(Cz))
delSpecificHeats.append(np.std(Cz))
Susceptibilities.append(np.mean(Xz))
delSusceptibilities.append(np.std(Xz))
if T[t] < T_c:
M_theoric[t] = pow(1 - pow(np.sinh(2 * beta), -4), 1 / 8)
C_theoric[t] = (2.0 / np.pi) * (math.log(1 + math.sqrt(2)) ** 2) * (-math.log(1 - T[t] / T_c) + math.log(1.0 / math.log(1 + math.sqrt(2))) - (1 + np.pi / 4))
else:
C_theoric[t] = 0
return (T, Energies, Magnetizations, SpecificHeats, Susceptibilities,
delEnergies, delMagnetizations, M_theoric, C_theoric, delSpecificHeats, delSusceptibilities)
# Parameters
L = 10 # Size of the lattice
eqSteps = 1000 # Number of steps to reach equilibrium
err_runs = 10 # Number of error runs
# Initial configuration (random spins)
config = 2 * np.random.randint(2, size=(L, L)) - 1
# Perform calculations
results = calcul_energy_mag_C_X(config, L, eqSteps, err_runs)
# Unpack results
(T, Energies, Magnetizations, SpecificHeats, Susceptibilities,
delEnergies, delMagnetizations, M_theoric, C_theoric,
delSpecificHeats, delSusceptibilities) = results
# Plot results
import matplotlib.pyplot as plt
plt.figure()
plt.errorbar(T, Energies, yerr=delEnergies, label='Energy')
plt.xlabel('Temperature')
plt.ylabel('Energy')
plt.legend()
plt.show()
plt.figure()
plt.errorbar(T, Magnetizations, yerr=delMagnetizations, label='Magnetization')
plt.xlabel('Temperature')
plt.ylabel('Magnetization')
plt.legend()
plt.show()
plt.figure()
plt.errorbar(T, SpecificHeats, yerr=delSpecificHeats, label='Specific Heat')
plt.plot(T, C_theoric, label='Theoretical Specific Heat')
plt.xlabel('Temperature')
plt.ylabel('Specific Heat')
plt.legend()
plt.show()
plt.figure()
plt.errorbar(T, Susceptibilities, yerr=delSusceptibilities, label='Susceptibility')
plt.xlabel('Temperature')
plt.ylabel('Susceptibility')
plt.legend()
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import matplotlib.pyplot as plt
import numpy as np
# Define N values
N = np.arange(1, 1001)
# Calculate logarithm of N base 2
log2_N = np.log2(N)
# Generate plot
plt.figure(figsize=(8, 6))
plt.plot(N, log2_N, label="log2(N)")
plt.xlabel("N")
plt.ylabel("n = log2(N)") # number of qubits
plt.title("Logarithm of N (base 2) for N = 1 to 1000")
plt.grid(True)
plt.legend()
plt.show()
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.visualization import plot_histogram, plot_bloch_multivector
import numpy as np
import matplotlib.pyplot as plt
def state_preparation_circuit(x):
"""
Create a state preparation circuit for encoding the classical data point x.
"""
n = len(x)
qc = QuantumCircuit(n)
for i, xi in enumerate(x):
angle = 2 * np.arctan(xi) # Convert the feature xi to an angle for rotation
qc.ry(angle, i) # Apply Ry rotation to the i-th qubit
return qc
# Example classical data point
x = [0.1, 0.5, 0.3]
# Create the state preparation circuit
qc = state_preparation_circuit(x)
# Visualize the circuit
qc.draw('mpl')
# Use the Aer simulator to simulate the quantum state
simulator = Aer.get_backend('statevector_simulator')
# Transpile and assemble the quantum circuit
transpiled_qc = transpile(qc, simulator)
qobj = assemble(transpiled_qc)
# Execute the circuit on the statevector simulator
result = simulator.run(qobj).result()
# Get the statevector representing the quantum state
statevector = result.get_statevector()
# Plot the Bloch sphere representation of the quantum state
plot_bloch_multivector(statevector)
# Print the statevector to see the quantum state
print("Statevector representing the quantum state:")
print(statevector)
# Define the dense angle encoding circuit for multiple features
def dense_angle_encoding_circuit(x):
"""
Create a quantum circuit to encode a feature vector x
using dense angle encoding.
"""
num_qubits = len(x) // 2
qc = QuantumCircuit(num_qubits)
for i in range(0, len(x), 2):
amplitude_angle = np.pi * x[i]
phase_angle = 2 * np.pi * x[i+1]
qc.ry(2 * amplitude_angle, i//2)
qc.rz(phase_angle, i//2)
return qc
# Example feature vector with 8 features
x = [0.3, 0.7, 0.5, 0.9, 0.2, 0.4, 0.8, 0.6]
# Create the dense angle encoding circuit
qc = dense_angle_encoding_circuit(x)
# Visualize the circuit
qc.draw('mpl')
# Use the Aer simulator to simulate the quantum state
simulator = Aer.get_backend('statevector_simulator')
# Transpile and assemble the quantum circuit
transpiled_qc = transpile(qc, simulator)
qobj = assemble(transpiled_qc)
# Execute the circuit on the statevector simulator
result = simulator.run(qobj).result()
# Get the statevector representing the quantum state
statevector = result.get_statevector()
# Plot the Bloch sphere representation of the quantum state
plot_bloch_multivector(statevector)
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
from qiskit import QuantumCircuit, execute, Aer
from qiskit.quantum_info import DensityMatrix, entropy
from qiskit.visualization import plot_histogram
import numpy as np
# Helper function to compute von Neumann entropy
def von_neumann_entropy(rho):
return entropy(DensityMatrix(rho))
# Step 1: Create the entangled state (Bell state)
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
# Simulate to get the density matrix of the initial entangled state
backend = Aer.get_backend('statevector_simulator')
result = execute(qc, backend).result()
statevector = result.get_statevector(qc)
rho_AB = DensityMatrix(statevector).data
# Calculate the initial entropy
S_rho_AB = von_neumann_entropy(rho_AB)
print(f"Initial entropy S(ฯ_AB): {S_rho_AB}")
# Step 2: Apply local operations and calculate the averaged density matrix
pauli_operators = [np.array([[1, 0], [0, 1]]), # I
np.array([[0, 1], [1, 0]]), # X
np.array([[0, -1j], [1j, 0]]), # Y
np.array([[1, 0], [0, -1]])] # Z
rho_bar_AB = np.zeros((4, 4), dtype=complex)
for sigma in pauli_operators:
U = np.kron(sigma, np.eye(2))
rho_bar_AB += U @ rho_AB @ U.conj().T
rho_bar_AB /= 4
# Calculate the entropy of the averaged state
S_rho_bar_AB = von_neumann_entropy(rho_bar_AB)
print(f"Entropy of averaged state S(ฯฬ_AB): {S_rho_bar_AB}")
# Step 3: Compute the Holevo quantity
holevo_quantity = S_rho_bar_AB - S_rho_AB
print(f"Holevo quantity ฯ(ฯ_AB): {holevo_quantity}")
# Optionally visualize the density matrices
import matplotlib.pyplot as plt
from qiskit.visualization import plot_state_city
# Plot the density matrix of the original state
plot_state_city(DensityMatrix(rho_AB), title="Original State ฯ_AB")
plt.show()
# Plot the density matrix of the averaged state
plot_state_city(DensityMatrix(rho_bar_AB), title="Averaged State ฯฬ_AB")
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
#DOI : https://doi.org/10.1143/JPSJ.12.570
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import expm
# Define Pauli matrices
sigma_x = np.array([[0, 1], [1, 0]])
sigma_y = np.array([[0, -1j], [1j, 0]])
sigma_z = np.array([[1, 0], [0, -1]])
# System parameters
omega_0 = 1.0 # Transition frequency
Omega = 0.1 # Driving strength
omega = 1.0 # Driving frequency
hbar = 1.0
# Time array
t = np.linspace(0, 50, 10000)
dt = t[1] - t[0]
# Initial density matrix (ground state)
rho = np.array([[1, 0], [0, 0]], dtype=complex)
# Hamiltonian matrices
H0 = (hbar * omega_0 / 2) * sigma_z
H_prime = lambda t: hbar * Omega * np.cos(omega * t) * sigma_x
# Time evolution
rho_t = np.zeros((len(t), 2, 2), dtype=complex)
rho_t[0] = rho
for i in range(1, len(t)):
H = H0 + H_prime(t[i-1])
U = expm(-1j * H * dt / hbar)
rho = U @ rho @ U.conj().T
rho_t[i] = rho
# Calculate expectation values of Pauli z-matrix
expectation_z = [np.trace(rho @ sigma_z).real for rho in rho_t]
# Plot the results
plt.figure(figsize=(10, 6))
plt.plot(t, expectation_z, label=r'$\langle \sigma_z \rangle$')
plt.xlabel('Time $t$')
plt.ylabel(r'$\langle \sigma_z \rangle$')
plt.title('Response of a Two-Level System to a Sinusoidal Driving Field')
plt.legend()
plt.grid()
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import expm
# Define Pauli matrices
sigma_x = np.array([[0, 1], [1, 0]])
sigma_y = np.array([[0, -1j], [1j, 0]])
sigma_z = np.array([[1, 0], [0, -1]])
# System parameters
omega_0 = 1.0 # Transition frequency
Omega = 0.1 # Driving strength
hbar = 1.0
# Time array
t = np.linspace(0, 50, 10000)
dt = t[1] - t[0]
# Initial density matrix (ground state)
rho = np.array([[1, 0], [0, 0]], dtype=complex)
# Hamiltonian matrices
H0 = (hbar * omega_0 / 2) * sigma_z
H_prime = lambda t: hbar * Omega * (t >= 0) * sigma_x # Step function perturbation
# Time evolution
rho_t = np.zeros((len(t), 2, 2), dtype=complex)
rho_t[0] = rho
for i in range(1, len(t)):
H = H0 + H_prime(t[i-1])
U = expm(-1j * H * dt / hbar)
rho = U @ rho @ U.conj().T
rho_t[i] = rho
# Calculate expectation values of Pauli z-matrix
expectation_z = [np.trace(rho @ sigma_z).real for rho in rho_t]
# Plot the results
plt.figure(figsize=(10, 6))
plt.plot(t, expectation_z, label=r'$\langle \sigma_z \rangle$')
plt.xlabel('Time $t$')
plt.ylabel(r'$\langle \sigma_z \rangle$')
plt.title('Response of a Two-Level System to a Step Function Driving Field')
plt.legend()
plt.grid()
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import numpy as np
import matplotlib.pyplot as plt
# Define a function to create a random Hermitian matrix of size n
def create_random_hermitian_matrix(n):
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
return A + A.conj().T
# Define the size of the matrix
n = 10
# Create the unperturbed density matrix (ground state)
rho_0_large = create_random_hermitian_matrix(n)
rho_0_large = (rho_0_large + rho_0_large.conj().T) / 2 # Ensure it's Hermitian
# Create a small perturbation matrix
perturbation_large = create_random_hermitian_matrix(n) * 0.05 # Scale it to be a small perturbation
# Linear response: rho(t) = rho_0 + perturbation
rho_linear_large = rho_0_large + perturbation_large
# Multiplicative response: rho(t) = rho_0 * perturbation (element-wise multiplication)
rho_multiplicative_large = rho_0_large * perturbation_large
# Compute the eigenvalues
eigenvalues_rho_0_large = np.linalg.eigvals(rho_0_large)
eigenvalues_linear_large = np.linalg.eigvals(rho_linear_large)
eigenvalues_multiplicative_large = np.linalg.eigvals(rho_multiplicative_large)
# Plot the eigenvalues
fig, ax = plt.subplots(figsize=(12, 6))
x = np.arange(n)
ax.plot(x, eigenvalues_rho_0_large.real, 'o-', label='rho_0')
ax.plot(x, eigenvalues_linear_large.real, 's-', label='Linear (rho_0 + perturbation)')
ax.plot(x, eigenvalues_multiplicative_large.real, 'd-', label='Multiplicative (rho_0 * perturbation)')
ax.set_xticks(x)
ax.set_xticklabels([f'Eigenvalue {i+1}' for i in x])
ax.set_ylabel('Eigenvalues (Real Part)')
ax.set_title('Eigenvalues of the Density Matrix for a Large System')
ax.legend()
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import numpy as np
import matplotlib.pyplot as plt
# Define parameters^M
frequency = 100 # Hz (cycles per second)^M
sampling_rate = 10000 # Samples per second^M
duration = 1 # Seconds^M
time = np.linspace(0, duration, sampling_rate)
# Generate signal^M
signal = np.sin(2 * np.pi * frequency * time)
# Calculate average^M
average = np.mean(signal)
# Print average^M
print("Average of the signal:", average)
# Plot signal^M
plt.plot(time, signal)
plt.xlabel("Time (s)")
plt.ylabel("Signal")
plt.title("Fast-Oscillating Signal (Average: {:.4f})".format(average))
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
w_0 = 1e-1
w = np.linspace(0, 1e3, 1000)
func = np.exp(1j*(w_0 - 1*w))
plt.figure(figsize=(10,4))
plt.plot(w, func)
plt.grid()
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
# Create a Quantum Circuit with 3 qubits
qc = QuantumCircuit(3, 3)
# Create the Bell state
qc.h(1)
qc.cx(1, 2)
# Prepare the state to be teleported
qc.x(0) # Example: teleporting |1>
# Entangle the qubit to be teleported with the first qubit of the Bell pair
qc.cx(0, 1)
qc.h(0)
# Measure the qubits
qc.measure([0, 1], [0, 1])
# Apply conditional operations based on the measurement results
qc.cx(1, 2)
qc.cz(0, 2)
# Measure the teleported qubit
qc.measure(2, 2)
# Simulate the circuit
simulator = Aer.get_backend('qasm_simulator')
result = execute(qc, backend=simulator, shots=1024).result()
# Get the counts of the measurement results
counts = result.get_counts()
# Plot the results
plot_histogram(counts, title="Standard Quantum Teleportation")
qc.draw(output='mpl')
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
from qiskit.providers.aer import noise
# Create a Quantum Circuit with 3 qubits
qc_mixed = QuantumCircuit(3, 3)
# Create the Bell state
qc_mixed.h(1)
qc_mixed.cx(1, 2)
# Prepare the state to be teleported
qc_mixed.x(0) # Example: teleporting |1>
# Entangle the qubit to be teleported with the first qubit of the noisy Bell pair
qc_mixed.cx(0, 1)
qc_mixed.h(0)
# Measure the qubits
qc_mixed.measure([0, 1], [0, 1])
# Apply conditional operations based on the measurement results
qc_mixed.cx(1, 2)
qc_mixed.cz(0, 2)
# Measure the teleported qubit
qc_mixed.measure(2, 2)
# Define a noise model with bit-flip noise
bit_flip_prob = 0.2
bit_flip_noise = noise.pauli_error([('X', bit_flip_prob), ('I', 1 - bit_flip_prob)])
# Create a noise model
noise_model = noise.NoiseModel()
noise_model.add_all_qubit_quantum_error(bit_flip_noise, ['x', 'h'])
# Add single-qubit noise to the circuit to simulate the mixed state
qc_noisy = qc_mixed.copy()
qc_noisy.append(bit_flip_noise.to_instruction(), [1])
qc_noisy.append(bit_flip_noise.to_instruction(), [2])
# Simulate the circuit with noise
simulator = Aer.get_backend('qasm_simulator')
result_mixed = execute(qc_noisy, backend=simulator, shots=1024, noise_model=noise_model).result()
# Get the counts of the measurement results
counts_mixed = result_mixed.get_counts()
# Plot the results
plot_histogram(counts_mixed, title="Teleportation with Mixed State Resource")
import numpy as np
import matplotlib.pyplot as plt
# Define parameters
omega_0 = 1.0 # Resonant frequency of the two-level atom
gamma = 0.1 # Width of the Lorentzian peak for resonant interaction
gamma_off = 0.3 # Width of the off-resonant interaction
omega_off = 1.5 # Center frequency for off-resonant interaction
# Frequency range
omega = np.linspace(0, 2, 1000)
# Lorentzian spectral density function for resonant interaction
def J_resonant(omega, omega_0, gamma):
return gamma**2 / ((omega - omega_0)**2 + gamma**2)
# Lorentzian spectral density function for off-resonant interaction
def J_off_resonant(omega, omega_off, gamma_off):
return gamma_off**2 / ((omega - omega_off)**2 + gamma_off**2)
# Total spectral density
def J_total(omega, omega_0, gamma, omega_off, gamma_off):
return J_resonant(omega, omega_0, gamma) + J_off_resonant(omega, omega_off, gamma_off)
# Compute spectral densities
J_omega_resonant = J_resonant(omega, omega_0, gamma)
J_omega_off_resonant = J_off_resonant(omega, omega_off, gamma_off)
J_omega_total = J_total(omega, omega_0, gamma, omega_off, gamma_off)
# Plot the spectral densities
plt.figure(figsize=(10, 6))
plt.plot(omega, J_omega_resonant, label='Resonant Interaction $J_{\\text{res}}(\\omega)$')
plt.plot(omega, J_omega_off_resonant, label='Off-Resonant Interaction $J_{\\text{off-res}}(\\omega)$')
plt.plot(omega, J_omega_total, label='Total Spectral Density $J_{\\text{total}}(\\omega)$', linestyle='--')
plt.axvline(x=omega_0, color='r', linestyle='--', label='$\\omega_0$ (Resonant Frequency)')
plt.axvline(x=omega_off, color='g', linestyle='--', label='$\\omega_{\\text{off}}$ (Off-Resonant Frequency)')
plt.xlabel('$\\omega$ (Frequency)')
plt.ylabel('$J(\\omega)$ (Spectral Density)')
plt.title('Spectral Density for Two-Level Atom with Resonant and Off-Resonant Interactions')
plt.legend()
plt.grid(True)
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
from qiskit import QuantumCircuit, Aer, execute
import matplotlib.pyplot as plt
# Function to generate a random bit using a quantum circuit
def generate_random_bit():
# Create a quantum circuit with one qubit and one classical bit
qc = QuantumCircuit(1, 1)
# Apply Hadamard gate to put the qubit in superposition
qc.h(0)
# Measure the qubit
qc.measure(0, 0)
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator
result = execute(qc, simulator, shots=1).result()
# Get the measurement result
counts = result.get_counts(qc)
bit = int(list(counts.keys())[0])
return bit
# Generate a random bit
random_bit = generate_random_bit()
print(f"Random bit: {random_bit}")
# Generate multiple random bits
num_bits = 10
random_bits = [generate_random_bit() for _ in range(num_bits)]
print(f"Random bits: {random_bits}")
# Plot the random bits
plt.bar(range(num_bits), random_bits, tick_label=range(num_bits))
plt.xlabel('Bit index')
plt.ylabel('Random bit value')
plt.title('Quantum Random Number Generation')
plt.show()
import numpy as np
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
import random
# Function to create a Bell state measurement circuit for Eve
def bell_measurement(qc, qubit1, qubit2):
qc.cx(qubit1, qubit2)
qc.h(qubit1)
qc.measure([qubit1, qubit2], [0, 1])
# Create a quantum circuit with three qubits and two classical bits
qc = QuantumCircuit(3, 2)
# Step 1: Create Bell state (entanglement between Alice and Bob)
qc.h(0) # Apply H gate to qubit 0 (Alice's qubit)
qc.cx(0, 1) # Apply CNOT gate with control qubit 0 and target qubit 1 (Bob's qubit)
# Randomly choose one of the Bell states for Eve's measurement
bell_states = ['phi_plus', 'phi_minus', 'psi_plus', 'psi_minus']
for i in range(100):
chosen_state = random.choice(bell_states)
# Step 2: Eve prepares her qubit and performs Bell state measurement
# Eve's qubit is qubit 2
if chosen_state == 'phi_plus':
pass # No additional gates needed, default Bell state |\Phi^+\rangle
elif chosen_state == 'phi_minus':
qc.z(2) # Apply Z gate to Eve's qubit for |\Phi^-\rangle
elif chosen_state == 'psi_plus':
qc.x(2) # Apply X gate to Eve's qubit for |\Psi^+\rangle
elif chosen_state == 'psi_minus':
qc.x(2)
qc.z(2) # Apply X and Z gates to Eve's qubit for |\Psi^-\rangle
# Eve performs Bell state measurement on Bob's qubit and her qubit
bell_measurement(qc, 1, 2)
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator
result = execute(qc, simulator, shots=1024).result()
# Get the counts (measurement results)
counts = result.get_counts(qc)
# Plot the results
print(f"Counts (Eve's measurement with Bell state {chosen_state}): {counts}")
#plot_histogram(counts)
#plt.title(f"Eve's Measurement Results with Bell State {chosen_state}")
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
!pip install qutip
import numpy as np
from qutip import *
# Define the system parameters
omega = 1.0 # Frequency of the external potential
interaction_strength = 0.5 # Strength of the interaction
time_points = np.linspace(0, 10, 1000) # Time points for simulation
# Define the Hamiltonian for the system
H0 = omega * tensor(sigmax(), identity(2)) # External potential
Hint = interaction_strength * (tensor(sigmax(), sigmax()) + tensor(sigmay(), sigmay())) # Interaction term
H = H0 + Hint # Total Hamiltonian
# Define the initial state of the system
psi0 = tensor(basis(2, 0), basis(2, 1)) # Example initial state: |0โฉโจ|1โฉ
# Simulate the time evolution of the system
result = mesolve(H, psi0, time_points, [], [])
# Extract the desired state from the result list
states = result.states
# Calculate the concurrence at each time point
concurrence = [concurrence(state) for state in states]
# Plot the concurrence as a function of time
import matplotlib.pyplot as plt
plt.plot(time_points, concurrence)
plt.xlabel('Time')
plt.ylabel('Concurrence')
plt.title('Effect of External Potential on Concurrence')
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import random
import matplotlib.pyplot as plt
def generate_n_bit_inputs(n):
"""Generate all possible n-bit inputs."""
return [bin(i)[2:].zfill(n) for i in range(2**n)]
def constant_function(value):
"""Returns a constant function that always returns the given value."""
return lambda x: value
def balanced_function(n):
"""Returns a balanced function for n-bit inputs."""
inputs = generate_n_bit_inputs(n)
half = len(inputs) // 2
random.shuffle(inputs)
lookup = {x: 0 if i < half else 1 for i, x in enumerate(inputs)}
return lambda x: lookup[x]
def determine_function_type(f, n):
"""Determine if the function f is constant or balanced."""
inputs = generate_n_bit_inputs(n)
outputs = [f(x) for x in inputs]
unique_outputs = set(outputs)
if len(unique_outputs) == 1:
return "Constant"
elif outputs.count(0) == outputs.count(1):
return "Balanced"
else:
return "Unknown"
def plot_function(f, n, title):
"""Plot the function outputs for all n-bit inputs."""
inputs = generate_n_bit_inputs(n)
outputs = [f(x) for x in inputs]
# Convert binary inputs to integers for plotting
x = [int(i, 2) for i in inputs]
y = outputs
plt.figure(figsize=(10, 5))
plt.scatter(x, y, c='blue')
plt.title(title)
plt.xlabel('Input (as integer)')
plt.ylabel('Output')
plt.xticks(range(2**n))
plt.yticks([0, 1])
plt.grid(True)
plt.show()
# Define n
n = 3
# Create a constant function that always returns 1
const_func = constant_function(1)
print("Constant Function Test:")
print(f"The function is: {determine_function_type(const_func, n)}")
plot_function(const_func, n, "Constant Function (Always 1)")
# Create a balanced function for n-bit inputs
bal_func = balanced_function(n)
print("\nBalanced Function Test:")
print(f"The function is: {determine_function_type(bal_func, n)}")
plot_function(bal_func, n, "Balanced Function")
# useful additional packages
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# importing Qiskit
from qiskit import BasicAer, IBMQ
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.compiler import transpile
from qiskit.tools.monitor import job_monitor
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
n = 13 # the length of the first register for querying the oracle
# Choose a type of oracle at random. With probability half it is constant,
# and with the same probability it is balanced
oracleType, oracleValue = np.random.randint(2), np.random.randint(2)
if oracleType == 0:
print("The oracle returns a constant value ", oracleValue)
else:
print("The oracle returns a balanced function")
a = np.random.randint(1,2**n) # this is a hidden parameter for balanced oracle.
# Creating registers
# n qubits for querying the oracle and one qubit for storing the answer
qr = QuantumRegister(n+1) #all qubits are initialized to zero
# for recording the measurement on the first register
cr = ClassicalRegister(n)
circuitName = "DeutschJozsa"
djCircuit = QuantumCircuit(qr, cr)
# Create the superposition of all input queries in the first register by applying the Hadamard gate to each qubit.
for i in range(n):
djCircuit.h(qr[i])
# Flip the second register and apply the Hadamard gate.
djCircuit.x(qr[n])
djCircuit.h(qr[n])
# Apply barrier to mark the beginning of the oracle
djCircuit.barrier()
if oracleType == 0:#If the oracleType is "0", the oracle returns oracleValue for all input.
if oracleValue == 1:
djCircuit.x(qr[n])
else:
djCircuit.id(qr[n])
else: # Otherwise, it returns the inner product of the input with a (non-zero bitstring)
for i in range(n):
if (a & (1 << i)):
djCircuit.cx(qr[i], qr[n])
# Apply barrier to mark the end of the oracle
djCircuit.barrier()
# Apply Hadamard gates after querying the oracle
for i in range(n):
djCircuit.h(qr[i])
# Measurement
djCircuit.barrier()
for i in range(n):
djCircuit.measure(qr[i], cr[i])
#draw the circuit
djCircuit.draw(output='mpl',scale=0.5)
backend = BasicAer.get_backend('qasm_simulator')
shots = 1000
job = execute(djCircuit, backend=backend, shots=shots)
results = job.result()
answer = results.get_counts()
plot_histogram(answer)
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import numpy as np
# Parameters
J = 1
k_B = 1
T = 2.269 # Critical temperature
K = J / (k_B * T)
# Transfer matrix for a 2x2 Ising model (simplified example)
def transfer_matrix(K):
T = np.zeros((4, 4))
configs = [(-1, -1), (-1, 1), (1, -1), (1, 1)]
for i, (s1, s2) in enumerate(configs):
for j, (s3, s4) in enumerate(configs):
E = K * (s1*s2 + s2*s3 + s3*s4 + s4*s1)
T[i, j] = np.exp(E)
return T
T_matrix = transfer_matrix(K)
eigenvalues, _ = np.linalg.eig(T_matrix)
lambda_max = np.max(eigenvalues)
# Free energy per spin
f = -k_B * T * np.log(lambda_max)
print(f"Free energy per spin: {f:.4f}")
# Output the transfer matrix and its largest eigenvalue
print("Transfer matrix:\n", T_matrix)
print("Largest eigenvalue:", lambda_max)
import numpy as np
import random
import math
def MC_step(config, beta):
'''Monte Carlo move using Metropolis algorithm '''
L = len(config)
for _ in range(L * L): # Corrected the loop
a = np.random.randint(0, L)
b = np.random.randint(0, L)
sigma = config[a, b]
neighbors = (config[(a + 1) % L, b] + config[a, (b + 1) % L] +
config[(a - 1) % L, b] + config[a, (b - 1) % L])
del_E = 2 * sigma * neighbors
if del_E < 0 or random.uniform(0, 1) < np.exp(-del_E * beta):
config[a, b] = -sigma
return config
def E_dimensionless(config, L):
'''Calculate the energy of the configuration'''
energy = 0
for i in range(L):
for j in range(L):
S = config[i, j]
neighbors = (config[(i + 1) % L, j] + config[i, (j + 1) % L] +
config[(i - 1) % L, j] + config[i, (j - 1) % L])
energy += -neighbors * S
return energy / 4 # To compensate for overcounting
def magnetization(config):
'''Calculate the magnetization of the configuration'''
return np.sum(config)
def calcul_energy_mag_C_X(config, L, eqSteps, err_runs):
print('finished')
nt = 100 # number of temperature points
mcSteps = 1000
T_c = 2 / math.log(1 + math.sqrt(2))
T = np.linspace(1., 7., nt)
E, M, C, X = np.zeros(nt), np.zeros(nt), np.zeros(nt), np.zeros(nt)
C_theoric, M_theoric = np.zeros(nt), np.zeros(nt)
delta_E, delta_M, delta_C, delta_X = np.zeros(nt), np.zeros(nt), np.zeros(nt), np.zeros(nt)
n1 = 1.0 / (mcSteps * L * L)
n2 = 1.0 / (mcSteps * mcSteps * L * L)
Energies, Magnetizations, SpecificHeats, Susceptibilities = [], [], [], []
delEnergies, delMagnetizations, delSpecificHeats, delSusceptibilities = [], [], [], []
for t in range(nt):
beta = 1. / T[t]
# Equilibrate the system
for _ in range(eqSteps):
MC_step(config, beta)
Ez, Cz, Mz, Xz = [], [], [], []
for _ in range(err_runs):
E, E_squared, M, M_squared = 0, 0, 0, 0
for _ in range(mcSteps):
MC_step(config, beta)
energy = E_dimensionless(config, L)
mag = abs(magnetization(config))
E += energy
E_squared += energy ** 2
M += mag
M_squared += mag ** 2
E_mean = E / mcSteps
E_squared_mean = E_squared / mcSteps
M_mean = M / mcSteps
M_squared_mean = M_squared / mcSteps
Energy = E_mean / (L ** 2)
SpecificHeat = beta ** 2 * (E_squared_mean - E_mean ** 2) / (L ** 2)
Magnetization = M_mean / (L ** 2)
Susceptibility = beta * (M_squared_mean - M_mean ** 2) / (L ** 2)
Ez.append(Energy)
Cz.append(SpecificHeat)
Mz.append(Magnetization)
Xz.append(Susceptibility)
Energies.append(np.mean(Ez))
delEnergies.append(np.std(Ez))
Magnetizations.append(np.mean(Mz))
delMagnetizations.append(np.std(Mz))
SpecificHeats.append(np.mean(Cz))
delSpecificHeats.append(np.std(Cz))
Susceptibilities.append(np.mean(Xz))
delSusceptibilities.append(np.std(Xz))
if T[t] < T_c:
M_theoric[t] = pow(1 - pow(np.sinh(2 * beta), -4), 1 / 8)
C_theoric[t] = (2.0 / np.pi) * (math.log(1 + math.sqrt(2)) ** 2) * (-math.log(1 - T[t] / T_c) + math.log(1.0 / math.log(1 + math.sqrt(2))) - (1 + np.pi / 4))
else:
C_theoric[t] = 0
return (T, Energies, Magnetizations, SpecificHeats, Susceptibilities,
delEnergies, delMagnetizations, M_theoric, C_theoric, delSpecificHeats, delSusceptibilities)
# Parameters
L = 10 # Size of the lattice
eqSteps = 1000 # Number of steps to reach equilibrium
err_runs = 10 # Number of error runs
# Initial configuration (random spins)
config = 2 * np.random.randint(2, size=(L, L)) - 1
# Perform calculations
results = calcul_energy_mag_C_X(config, L, eqSteps, err_runs)
# Unpack results
(T, Energies, Magnetizations, SpecificHeats, Susceptibilities,
delEnergies, delMagnetizations, M_theoric, C_theoric,
delSpecificHeats, delSusceptibilities) = results
# Plot results
import matplotlib.pyplot as plt
plt.figure()
plt.errorbar(T, Energies, yerr=delEnergies, label='Energy')
plt.xlabel('Temperature')
plt.ylabel('Energy')
plt.legend()
plt.show()
plt.figure()
plt.errorbar(T, Magnetizations, yerr=delMagnetizations, label='Magnetization')
plt.xlabel('Temperature')
plt.ylabel('Magnetization')
plt.legend()
plt.show()
plt.figure()
plt.errorbar(T, SpecificHeats, yerr=delSpecificHeats, label='Specific Heat')
plt.plot(T, C_theoric, label='Theoretical Specific Heat')
plt.xlabel('Temperature')
plt.ylabel('Specific Heat')
plt.legend()
plt.show()
plt.figure()
plt.errorbar(T, Susceptibilities, yerr=delSusceptibilities, label='Susceptibility')
plt.xlabel('Temperature')
plt.ylabel('Susceptibility')
plt.legend()
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
#DOI : https://doi.org/10.1143/JPSJ.12.570
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import expm
# Define Pauli matrices
sigma_x = np.array([[0, 1], [1, 0]])
sigma_y = np.array([[0, -1j], [1j, 0]])
sigma_z = np.array([[1, 0], [0, -1]])
# System parameters
omega_0 = 1.0 # Transition frequency
Omega = 0.1 # Driving strength
omega = 1.0 # Driving frequency
hbar = 1.0
# Time array
t = np.linspace(0, 50, 10000)
dt = t[1] - t[0]
# Initial density matrix (ground state)
rho = np.array([[1, 0], [0, 0]], dtype=complex)
# Hamiltonian matrices
H0 = (hbar * omega_0 / 2) * sigma_z
H_prime = lambda t: hbar * Omega * np.cos(omega * t) * sigma_x
# Time evolution
rho_t = np.zeros((len(t), 2, 2), dtype=complex)
rho_t[0] = rho
for i in range(1, len(t)):
H = H0 + H_prime(t[i-1])
U = expm(-1j * H * dt / hbar)
rho = U @ rho @ U.conj().T
rho_t[i] = rho
# Calculate expectation values of Pauli z-matrix
expectation_z = [np.trace(rho @ sigma_z).real for rho in rho_t]
# Plot the results
plt.figure(figsize=(10, 6))
plt.plot(t, expectation_z, label=r'$\langle \sigma_z \rangle$')
plt.xlabel('Time $t$')
plt.ylabel(r'$\langle \sigma_z \rangle$')
plt.title('Response of a Two-Level System to a Sinusoidal Driving Field')
plt.legend()
plt.grid()
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import expm
# Define Pauli matrices
sigma_x = np.array([[0, 1], [1, 0]])
sigma_y = np.array([[0, -1j], [1j, 0]])
sigma_z = np.array([[1, 0], [0, -1]])
# System parameters
omega_0 = 1.0 # Transition frequency
Omega = 0.1 # Driving strength
hbar = 1.0
# Time array
t = np.linspace(0, 50, 10000)
dt = t[1] - t[0]
# Initial density matrix (ground state)
rho = np.array([[1, 0], [0, 0]], dtype=complex)
# Hamiltonian matrices
H0 = (hbar * omega_0 / 2) * sigma_z
H_prime = lambda t: hbar * Omega * (t >= 0) * sigma_x # Step function perturbation
# Time evolution
rho_t = np.zeros((len(t), 2, 2), dtype=complex)
rho_t[0] = rho
for i in range(1, len(t)):
H = H0 + H_prime(t[i-1])
U = expm(-1j * H * dt / hbar)
rho = U @ rho @ U.conj().T
rho_t[i] = rho
# Calculate expectation values of Pauli z-matrix
expectation_z = [np.trace(rho @ sigma_z).real for rho in rho_t]
# Plot the results
plt.figure(figsize=(10, 6))
plt.plot(t, expectation_z, label=r'$\langle \sigma_z \rangle$')
plt.xlabel('Time $t$')
plt.ylabel(r'$\langle \sigma_z \rangle$')
plt.title('Response of a Two-Level System to a Step Function Driving Field')
plt.legend()
plt.grid()
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import numpy as np
import matplotlib.pyplot as plt
# Define a function to create a random Hermitian matrix of size n
def create_random_hermitian_matrix(n):
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
return A + A.conj().T
# Define the size of the matrix
n = 10
# Create the unperturbed density matrix (ground state)
rho_0_large = create_random_hermitian_matrix(n)
rho_0_large = (rho_0_large + rho_0_large.conj().T) / 2 # Ensure it's Hermitian
# Create a small perturbation matrix
perturbation_large = create_random_hermitian_matrix(n) * 0.05 # Scale it to be a small perturbation
# Linear response: rho(t) = rho_0 + perturbation
rho_linear_large = rho_0_large + perturbation_large
# Multiplicative response: rho(t) = rho_0 * perturbation (element-wise multiplication)
rho_multiplicative_large = rho_0_large * perturbation_large
# Compute the eigenvalues
eigenvalues_rho_0_large = np.linalg.eigvals(rho_0_large)
eigenvalues_linear_large = np.linalg.eigvals(rho_linear_large)
eigenvalues_multiplicative_large = np.linalg.eigvals(rho_multiplicative_large)
# Plot the eigenvalues
fig, ax = plt.subplots(figsize=(12, 6))
x = np.arange(n)
ax.plot(x, eigenvalues_rho_0_large.real, 'o-', label='rho_0')
ax.plot(x, eigenvalues_linear_large.real, 's-', label='Linear (rho_0 + perturbation)')
ax.plot(x, eigenvalues_multiplicative_large.real, 'd-', label='Multiplicative (rho_0 * perturbation)')
ax.set_xticks(x)
ax.set_xticklabels([f'Eigenvalue {i+1}' for i in x])
ax.set_ylabel('Eigenvalues (Real Part)')
ax.set_title('Eigenvalues of the Density Matrix for a Large System')
ax.legend()
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import numpy as np
import matplotlib.pyplot as plt
# Define parameters^M
frequency = 100 # Hz (cycles per second)^M
sampling_rate = 10000 # Samples per second^M
duration = 1 # Seconds^M
time = np.linspace(0, duration, sampling_rate)
# Generate signal^M
signal = np.sin(2 * np.pi * frequency * time)
# Calculate average^M
average = np.mean(signal)
# Print average^M
print("Average of the signal:", average)
# Plot signal^M
plt.plot(time, signal)
plt.xlabel("Time (s)")
plt.ylabel("Signal")
plt.title("Fast-Oscillating Signal (Average: {:.4f})".format(average))
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
w_0 = 1e-1
w = np.linspace(0, 1e3, 1000)
func = np.exp(1j*(w_0 - 1*w))
plt.figure(figsize=(10,4))
plt.plot(w, func)
plt.grid()
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
# Create a Quantum Circuit with 3 qubits
qc = QuantumCircuit(3, 3)
# Create the Bell state
qc.h(1)
qc.cx(1, 2)
# Prepare the state to be teleported
qc.x(0) # Example: teleporting |1>
# Entangle the qubit to be teleported with the first qubit of the Bell pair
qc.cx(0, 1)
qc.h(0)
# Measure the qubits
qc.measure([0, 1], [0, 1])
# Apply conditional operations based on the measurement results
qc.cx(1, 2)
qc.cz(0, 2)
# Measure the teleported qubit
qc.measure(2, 2)
# Simulate the circuit
simulator = Aer.get_backend('qasm_simulator')
result = execute(qc, backend=simulator, shots=1024).result()
# Get the counts of the measurement results
counts = result.get_counts()
# Plot the results
plot_histogram(counts, title="Standard Quantum Teleportation")
qc.draw(output='mpl')
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
from qiskit.providers.aer import noise
# Create a Quantum Circuit with 3 qubits
qc_mixed = QuantumCircuit(3, 3)
# Create the Bell state
qc_mixed.h(1)
qc_mixed.cx(1, 2)
# Prepare the state to be teleported
qc_mixed.x(0) # Example: teleporting |1>
# Entangle the qubit to be teleported with the first qubit of the noisy Bell pair
qc_mixed.cx(0, 1)
qc_mixed.h(0)
# Measure the qubits
qc_mixed.measure([0, 1], [0, 1])
# Apply conditional operations based on the measurement results
qc_mixed.cx(1, 2)
qc_mixed.cz(0, 2)
# Measure the teleported qubit
qc_mixed.measure(2, 2)
# Define a noise model with bit-flip noise
bit_flip_prob = 0.2
bit_flip_noise = noise.pauli_error([('X', bit_flip_prob), ('I', 1 - bit_flip_prob)])
# Create a noise model
noise_model = noise.NoiseModel()
noise_model.add_all_qubit_quantum_error(bit_flip_noise, ['x', 'h'])
# Add single-qubit noise to the circuit to simulate the mixed state
qc_noisy = qc_mixed.copy()
qc_noisy.append(bit_flip_noise.to_instruction(), [1])
qc_noisy.append(bit_flip_noise.to_instruction(), [2])
# Simulate the circuit with noise
simulator = Aer.get_backend('qasm_simulator')
result_mixed = execute(qc_noisy, backend=simulator, shots=1024, noise_model=noise_model).result()
# Get the counts of the measurement results
counts_mixed = result_mixed.get_counts()
# Plot the results
plot_histogram(counts_mixed, title="Teleportation with Mixed State Resource")
import numpy as np
import matplotlib.pyplot as plt
# Define parameters
omega_0 = 1.0 # Resonant frequency of the two-level atom
gamma = 0.1 # Width of the Lorentzian peak for resonant interaction
gamma_off = 0.3 # Width of the off-resonant interaction
omega_off = 1.5 # Center frequency for off-resonant interaction
# Frequency range
omega = np.linspace(0, 2, 1000)
# Lorentzian spectral density function for resonant interaction
def J_resonant(omega, omega_0, gamma):
return gamma**2 / ((omega - omega_0)**2 + gamma**2)
# Lorentzian spectral density function for off-resonant interaction
def J_off_resonant(omega, omega_off, gamma_off):
return gamma_off**2 / ((omega - omega_off)**2 + gamma_off**2)
# Total spectral density
def J_total(omega, omega_0, gamma, omega_off, gamma_off):
return J_resonant(omega, omega_0, gamma) + J_off_resonant(omega, omega_off, gamma_off)
# Compute spectral densities
J_omega_resonant = J_resonant(omega, omega_0, gamma)
J_omega_off_resonant = J_off_resonant(omega, omega_off, gamma_off)
J_omega_total = J_total(omega, omega_0, gamma, omega_off, gamma_off)
# Plot the spectral densities
plt.figure(figsize=(10, 6))
plt.plot(omega, J_omega_resonant, label='Resonant Interaction $J_{\\text{res}}(\\omega)$')
plt.plot(omega, J_omega_off_resonant, label='Off-Resonant Interaction $J_{\\text{off-res}}(\\omega)$')
plt.plot(omega, J_omega_total, label='Total Spectral Density $J_{\\text{total}}(\\omega)$', linestyle='--')
plt.axvline(x=omega_0, color='r', linestyle='--', label='$\\omega_0$ (Resonant Frequency)')
plt.axvline(x=omega_off, color='g', linestyle='--', label='$\\omega_{\\text{off}}$ (Off-Resonant Frequency)')
plt.xlabel('$\\omega$ (Frequency)')
plt.ylabel('$J(\\omega)$ (Spectral Density)')
plt.title('Spectral Density for Two-Level Atom with Resonant and Off-Resonant Interactions')
plt.legend()
plt.grid(True)
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
from qiskit import QuantumCircuit, Aer, execute
import matplotlib.pyplot as plt
# Function to generate a random bit using a quantum circuit
def generate_random_bit():
# Create a quantum circuit with one qubit and one classical bit
qc = QuantumCircuit(1, 1)
# Apply Hadamard gate to put the qubit in superposition
qc.h(0)
# Measure the qubit
qc.measure(0, 0)
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator
result = execute(qc, simulator, shots=1).result()
# Get the measurement result
counts = result.get_counts(qc)
bit = int(list(counts.keys())[0])
return bit
# Generate a random bit
random_bit = generate_random_bit()
print(f"Random bit: {random_bit}")
# Generate multiple random bits
num_bits = 10
random_bits = [generate_random_bit() for _ in range(num_bits)]
print(f"Random bits: {random_bits}")
# Plot the random bits
plt.bar(range(num_bits), random_bits, tick_label=range(num_bits))
plt.xlabel('Bit index')
plt.ylabel('Random bit value')
plt.title('Quantum Random Number Generation')
plt.show()
|
https://github.com/AmirhoseynpowAsghari/Qiskit-Tutorials
|
AmirhoseynpowAsghari
|
import random
import matplotlib.pyplot as plt
def generate_n_bit_inputs(n):
"""Generate all possible n-bit inputs."""
return [bin(i)[2:].zfill(n) for i in range(2**n)]
def constant_function(value):
"""Returns a constant function that always returns the given value."""
return lambda x: value
def balanced_function(n):
"""Returns a balanced function for n-bit inputs."""
inputs = generate_n_bit_inputs(n)
half = len(inputs) // 2
random.shuffle(inputs)
lookup = {x: 0 if i < half else 1 for i, x in enumerate(inputs)}
return lambda x: lookup[x]
def determine_function_type(f, n):
"""Determine if the function f is constant or balanced."""
inputs = generate_n_bit_inputs(n)
outputs = [f(x) for x in inputs]
unique_outputs = set(outputs)
if len(unique_outputs) == 1:
return "Constant"
elif outputs.count(0) == outputs.count(1):
return "Balanced"
else:
return "Unknown"
def plot_function(f, n, title):
"""Plot the function outputs for all n-bit inputs."""
inputs = generate_n_bit_inputs(n)
outputs = [f(x) for x in inputs]
# Convert binary inputs to integers for plotting
x = [int(i, 2) for i in inputs]
y = outputs
plt.figure(figsize=(10, 5))
plt.scatter(x, y, c='blue')
plt.title(title)
plt.xlabel('Input (as integer)')
plt.ylabel('Output')
plt.xticks(range(2**n))
plt.yticks([0, 1])
plt.grid(True)
plt.show()
# Define n
n = 3
# Create a constant function that always returns 1
const_func = constant_function(1)
print("Constant Function Test:")
print(f"The function is: {determine_function_type(const_func, n)}")
plot_function(const_func, n, "Constant Function (Always 1)")
# Create a balanced function for n-bit inputs
bal_func = balanced_function(n)
print("\nBalanced Function Test:")
print(f"The function is: {determine_function_type(bal_func, n)}")
plot_function(bal_func, n, "Balanced Function")
# useful additional packages
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# importing Qiskit
from qiskit import BasicAer, IBMQ
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.compiler import transpile
from qiskit.tools.monitor import job_monitor
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
n = 13 # the length of the first register for querying the oracle
# Choose a type of oracle at random. With probability half it is constant,
# and with the same probability it is balanced
oracleType, oracleValue = np.random.randint(2), np.random.randint(2)
if oracleType == 0:
print("The oracle returns a constant value ", oracleValue)
else:
print("The oracle returns a balanced function")
a = np.random.randint(1,2**n) # this is a hidden parameter for balanced oracle.
# Creating registers
# n qubits for querying the oracle and one qubit for storing the answer
qr = QuantumRegister(n+1) #all qubits are initialized to zero
# for recording the measurement on the first register
cr = ClassicalRegister(n)
circuitName = "DeutschJozsa"
djCircuit = QuantumCircuit(qr, cr)
# Create the superposition of all input queries in the first register by applying the Hadamard gate to each qubit.
for i in range(n):
djCircuit.h(qr[i])
# Flip the second register and apply the Hadamard gate.
djCircuit.x(qr[n])
djCircuit.h(qr[n])
# Apply barrier to mark the beginning of the oracle
djCircuit.barrier()
if oracleType == 0:#If the oracleType is "0", the oracle returns oracleValue for all input.
if oracleValue == 1:
djCircuit.x(qr[n])
else:
djCircuit.id(qr[n])
else: # Otherwise, it returns the inner product of the input with a (non-zero bitstring)
for i in range(n):
if (a & (1 << i)):
djCircuit.cx(qr[i], qr[n])
# Apply barrier to mark the end of the oracle
djCircuit.barrier()
# Apply Hadamard gates after querying the oracle
for i in range(n):
djCircuit.h(qr[i])
# Measurement
djCircuit.barrier()
for i in range(n):
djCircuit.measure(qr[i], cr[i])
#draw the circuit
djCircuit.draw(output='mpl',scale=0.5)
backend = BasicAer.get_backend('qasm_simulator')
shots = 1000
job = execute(djCircuit, backend=backend, shots=shots)
results = job.result()
answer = results.get_counts()
plot_histogram(answer)
|
https://github.com/Jaybsoni/QuantumCompiler
|
Jaybsoni
|
from qcompile import comp_utils as utils
from qiskit import *
import numpy as np
from pprint import pprint
import random
import matplotlib.pyplot as plt
random.seed(1) # set random seed 1
# Intro to some helper functions (which can be found in comp_utils.py):
circ = qiskit.QuantumCircuit(3) # construct a simple 3 qbit circuit
circ.h(0)
circ.cx(0, 2)
circ.i(1)
circ.z(0)
print(circ) ## visualize the circuit
## read_circ .......................................................................................................
gate_lst, num_qbits = utils.read_circ(circ) # here we use the read_circ helper function to extract the meta data
print('gate_lst: {}\n'.format(gate_lst)) # the gate_lst is an ordered list of tuples containing info about
# the gate, the qbits being applied to, and the parameters of the gate
## general_replace .................................................................................................
utils.general_replace(gate_lst, 'Z', [('X', [], [])]) # the general_replace function allows us to manipulate the gate_lst
print('replaced Z: {}\n'.format(gate_lst)) # here I replaced each instance of Z gate with an X gate.
utils.general_replace(gate_lst, 'I', []) # we can remove gates by providing an empty list of replacement gates
print('removed I: {}\n'.format(gate_lst)) # here I just removes every instance of 'I'
## write_circ ......................................................................................................
new_circ = utils.write_circ(gate_lst, num_qbits) # this function takes the gate_lst and creates a qiskit circuit object
print(new_circ)
# Here we will construct the gates defined above and numerically check if they are equal :
def Rx(theta):
"""Produces the Rx matrix given float theta (in radians) """
Rx_mat = np.array([[np.cos(theta/2), complex(0, -np.sin(theta/2))],
[complex(0, -np.sin(theta/2)), np.cos(theta/2)]])
return Rx_mat
def Ry(theta):
"""Produces the Ry matrix given float theta (in radians) """
Ry_mat = np.array([[np.cos(theta/2), -np.sin(theta/2)],
[np.sin(theta/2), np.cos(theta/2)]])
return Ry_mat
def Rz(phi):
"""Produces the Rz matrix given float phi (in radians) """
Rz_mat = np.array([[np.exp(-complex(0, phi/2)), 0],
[0, np.exp(complex(0, phi/2))]])
return Rz_mat
I = np.eye(2)
H = (1 / np.sqrt(2)) * np.array([[1, 1],
[1, -1]])
X = np.array([[0, 1],
[1, 0]])
Y = np.array([[0, -complex(0, 1)],
[complex(0, 1), 0]])
Z = np.array([[1, 0],
[0, -1]])
print('I matrix: --------------------- ')
pprint(I)
pprint(np.round(Rz(0), decimals=3))
pprint(np.round(Rx(0), decimals=3))
print('\n')
print('H matrix: --------------------- ')
pprint(H)
pprint(np.round(Rz(np.pi/2) @ Rx(np.pi/2) @ Rz(np.pi/2), decimals=3))
pprint(np.round(Rx(np.pi/2) @ Rz(np.pi/2) @ Rx(np.pi/2), decimals=3))
print('\n')
print('X matrix: --------------------- ')
pprint(X)
pprint(np.round(Rx(np.pi), decimals=3))
print('\n')
print('Z matrix: --------------------- ')
pprint(Z)
pprint(np.round(Rz(np.pi), decimals=3))
print('\n')
print('Y matrix: --------------------- ')
pprint(Y)
pprint(np.round(Rz(-np.pi/2) @ Rx(np.pi) @ Rz(np.pi/2), decimals=3))
pprint(np.round(Rx(np.pi/2) @ Rz(np.pi) @ Rx(-np.pi/2), decimals=3))
print('I matrix: --------------------- ')
global_phase = np.exp(0)
pprint(I)
pprint(np.round( global_phase * Rz(0) , decimals=3))
pprint(np.round( global_phase * Rx(0) , decimals=3))
print('\n')
print('H matrix: --------------------- ')
global_phase = np.exp(complex(0, np.pi/2))
pprint(H)
pprint(np.round( global_phase * (Rz(np.pi/2) @ Rx(np.pi/2) @ Rz(np.pi/2)) , decimals=3))
pprint(np.round( global_phase * (Rx(np.pi/2) @ Rz(np.pi/2) @ Rx(np.pi/2)) , decimals=3))
print('\n')
print('X matrix: --------------------- ')
global_phase = np.exp(complex(0, np.pi/2))
pprint(X)
pprint(np.round( global_phase * Rx(np.pi) , decimals=3))
print('\n')
print('Z matrix: --------------------- ')
global_phase = np.exp(complex(0, np.pi/2))
pprint(Z)
pprint(np.round( global_phase * Rz(np.pi) , decimals=3))
print('\n')
print('Y matrix: --------------------- ')
global_phase = np.exp(complex(0, 3 * np.pi/2))
pprint(Y)
pprint(np.round( global_phase * (Rz(-np.pi/2) @ Rx(np.pi) @ Rz(np.pi/2)) , decimals=3))
pprint(np.round( global_phase * (Rx(np.pi/2) @ Rz(np.pi) @ Rx(-np.pi/2)) , decimals=3))
def simple_compiler(circ):
'''A quantum compiler that produces a new quantum circuit from the
restricted subset of available gates. '''
gate_lst, num_qbits = utils.read_circ(circ)
# replace CNOT:
replacement_gates = [('H', utils.get_second, []), ('Cz', [], []), ('H', utils.get_second, [])]
utils.general_replace(gate_lst, 'Cx', replacement_gates)
# replace Identity:
replacement_gates = [('Rz', [], [0])]
utils.general_replace(gate_lst, 'I', replacement_gates)
# replace Hadamard:
replacement_gates = [('Rz', [], [np.pi/2]), ('Rx', [], [np.pi/2]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'H', replacement_gates)
# replace X:
replacement_gates = [('Rx', [], [np.pi])]
utils.general_replace(gate_lst, 'X', replacement_gates)
# replace Z:
replacement_gates = [('Rz', [], [np.pi])]
utils.general_replace(gate_lst, 'Z', replacement_gates)
# replace y:
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], [np.pi]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Y', replacement_gates)
# replace Ry(theta):
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], utils.get_first), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Ry', replacement_gates)
compiled_circ = utils.write_circ(gate_lst, num_qbits)
return compiled_circ
# Testing ground:
circ = utils.random_circ_generator(num_qbits=3, num_gates=5) # randomly generate a circuit,
print(circ)
compiled_circ = simple_compiler(circ) # compile it
print(compiled_circ)
equal = utils.circ_equal(circ, compiled_circ) # this helper function compares the magnitudes of each state_vector (element wise)
print(equal) # to determine if they are identical (up to a global phase)
# Brute force test:
for i in range(1000):
circ = utils.random_circ_generator()
compiled_circ = simple_compiler(circ)
equal = utils.circ_equal(circ, compiled_circ)
if not equal.all():
print('FAILED at circuit {}'.format(i))
break
else:
print('passed circuit {}'.format(i))
print('Passed all tests!')
# Here we analyze the overhead
depth_array = []
compiled_depth_array = []
ratio = []
for i in range(100):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # randomly generate a circuit with 5 qbits and 15 gates
compiled_circ = simple_compiler(circ)
equal = utils.circ_equal(circ, compiled_circ)
if not equal.all(): # make sure we are compiling properly!
print("FAIL @ circuit {}".format(i))
break
depth_circ = circ.depth()
depth_comp = compiled_circ.depth()
depth_array.append(depth_circ) # store depth
compiled_depth_array.append(depth_comp) # store new circuit depth
ratio.append(((depth_comp - depth_circ) / depth_circ) * 100)
print('average initial circuit depth = {} +/- {}'.format(np.mean(depth_array), np.std(depth_array)))
print('average compiled circuit depth = {} +/- {}'.format(np.mean(compiled_depth_array), np.std(compiled_depth_array)))
print('average increase in depth = {}%'.format(np.round(np.mean(ratio), decimals=2)))
plt.plot(depth_array, label='circuit')
plt.plot(compiled_depth_array, label='compiled')
plt.legend()
plt.show()
## Here we implement the optimized compiler:
def compiler(circ):
'''A quantum compiler that produces a new quantum circuit from the
restricted subset of available gates. '''
gate_lst, num_qbits = utils.read_circ(circ)
# Preprocessing (Step1):
utils.general_replace(gate_lst, 'I', []) # remove Identity
length = len(gate_lst)
for index in range(length - 1): # iterate over the lst and remove redundant Cx, Cz gates
if index >= (len(gate_lst) - 1): # by removing the repetitive Cz and Cx gates
break # we reduce the size of the list, so we need to check this edge case
curr_gate_str = gate_lst[index][0]
curr_qbit_lst = gate_lst[index][1]
if curr_gate_str in ['Cx', 'Cz']: # Check if this gate is a Cz or Cx gate
nxt_gate_str = gate_lst[index+1][0]
nxt_qbit_lst = gate_lst[index+1][1]
if ((nxt_gate_str == curr_gate_str) and # check that we are applying a Cz or Cx gate twice
(nxt_qbit_lst == curr_qbit_lst)): # consecutively on the same control and target qbits
del gate_lst[index + 1] # remove both gates
del gate_lst[index]
# Compile (similar to the simple compiler):
# replace CNOT:
replacement_gates = [('H', utils.get_second, []), ('Cz', [], []), ('H', utils.get_second, [])]
utils.general_replace(gate_lst, 'Cx', replacement_gates)
# replace Hadamard:
replacement_gates = [('Rz', [], [np.pi/2]), ('Rx', [], [np.pi/2]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'H', replacement_gates)
# replace X:
replacement_gates = [('Rx', [], [np.pi])]
utils.general_replace(gate_lst, 'X', replacement_gates)
# replace Z:
replacement_gates = [('Rz', [], [np.pi])]
utils.general_replace(gate_lst, 'Z', replacement_gates)
# replace y:
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], [np.pi]), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Y', replacement_gates)
# replace Ry(theta):
replacement_gates = [('Rz', [], [-np.pi/2]), ('Rx', [], utils.get_first), ('Rz', [], [np.pi/2])]
utils.general_replace(gate_lst, 'Ry', replacement_gates)
# simplification (Step2):
index = 0
while(index < len(gate_lst) - 1):
# print('index: {}'.format(index))
curr_gate_str = gate_lst[index][0]
curr_qbit_lst = gate_lst[index][1]
curr_qbit_params = gate_lst[index][2]
if curr_gate_str in ['Rx', 'Rz']: # Check if this gate is a Rz or Rx gate
i = 1 # another dummy index to look at gates ahead
while(index + i < len(gate_lst)):
# print('dummy: {}'.format(i))
nxt_gate_str = gate_lst[index+i][0]
nxt_qbit_lst = gate_lst[index+i][1]
nxt_qbit_params = gate_lst[index+i][2]
if ((nxt_gate_str == curr_gate_str) and # check that we are applying a Rz or Rx gate twice
(nxt_qbit_lst == curr_qbit_lst)): # consecutively on the same control and target qbits
del gate_lst[index + i] # remove both gates
del gate_lst[index]
new_gate = (curr_gate_str, curr_qbit_lst, [curr_qbit_params[0] + nxt_qbit_params[0]])
gate_lst.insert(index, new_gate) # add the combined gate and
break # break current while loop
elif ((nxt_gate_str != curr_gate_str) and # if the next gate applied to the same qbit is different
(nxt_qbit_lst == curr_qbit_lst or # i.e instead of another Rx gate we apply a Rz or a Cz to
curr_qbit_lst[0] in nxt_qbit_lst)): # the same qbit then
index += 1 # move forward nothing left here to simplify
break
else: # the next gate is being applied to a different set of qbits
i += 1 # so we can safely check the next gate in the list
index += 1
else:
index += 1
compiled_circ = utils.write_circ(gate_lst, num_qbits)
return compiled_circ
# Here we analyze the overhead
depth_array = []
compiled_depth_array = []
optimized_depth_array = []
ratio_simple = []
ratio_opt = []
for i in range(100):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # randomly generate a circuit with 5 qbits and 15 gates
compiled_circ = simple_compiler(circ)
optimized_circ = compiler(circ)
equal1 = utils.circ_equal(circ, compiled_circ)
equal2 = utils.circ_equal(circ, optimized_circ)
if not equal1.all(): # make sure we are compiling properly!
print("simple compiler FAIL @ circuit {}".format(i))
break
if not equal2.all():
print("optimized compiler FAIL @ circuit {}".format(i))
break
depth_circ = circ.depth()
depth_comp = compiled_circ.depth()
depth_opt = optimized_circ.depth()
depth_array.append(depth_circ) # store depth
compiled_depth_array.append(depth_comp) # store new circuit depth
optimized_depth_array.append(depth_opt) # store optimized circuit depth
ratio_simple.append(((depth_comp - depth_circ) / depth_circ) * 100)
ratio_opt.append(((depth_opt - depth_circ) / depth_circ) * 100)
print('average initial circuit depth = {} +/- {}'.format(np.mean(depth_array), np.std(depth_array)))
print('average compiled circuit depth = {} +/- {}'.format(np.mean(compiled_depth_array), np.std(compiled_depth_array)))
print('average optimized circuit depth = {} +/- {}'.format(np.mean(optimized_depth_array), np.std(optimized_depth_array)))
print('average increase in depth from simple compiler = {}%'.format(np.round(np.mean(ratio_simple), decimals=2)))
print('average increase in depth from optimized compiler = {}%'.format(np.round(np.mean(ratio_opt), decimals=2)))
plt.plot(depth_array, label='circuit')
plt.plot(compiled_depth_array, label='compiled')
plt.plot(optimized_depth_array, label='optimized')
plt.legend()
plt.show()
## Here we develop the router:
topology = {0:[4, 1], 1:[0, 2], 2:[1, 3], 3:[2, 4], 4:[3, 0]} # the key is the qbit and the value is the set of connected qbits
# the order of the qbit indicies in the value lst correspond to
# a natural orientation for traversing the ring
def get_path(topology, start, end):
''' Takes a dict (topology) representing the geometry of the
connections, an int (start) representing the starting index
and an int (end) representing the ending index and returns
a list corresponding to the shortest path from end --> start
(assuming a ring topology)'''
path_cw = [] # initialize the clockwise traversed path
path_ccw = [] # initialize the counter clockwise traversed path
path_cw.append(end)
path_ccw.append(end) # add the first point
current = end
while start not in topology[current]: # traverse clockwise while adding each intermediate qbit index
current = topology[current][1]
path_cw.append(current)
path_cw.append(start)
current = end
while start not in topology[current]: # traverse counter clockwise while adding each intermediate qbit index
current = topology[current][0]
path_ccw.append(current)
path_ccw.append(start)
if len(path_cw) <= len(path_ccw): # return the shorter among the two paths
return path_cw
else:
return path_ccw
def get_swaps(path):
'''Take a list (path) between an end qbit index and
a start qbit index. Return a list of tuples (replacement_gates)
which correspond to the set of swap gates required to swap the
end qbit with the start qbit'''
replacement_gates = []
for i in range(len(path) - 1): # iterate over the path
swap = ('S', [path[i], path[i+1]], []) # swap gate between consecutive qbits along the path
replacement_gates.append(swap)
# at this point, we have shifted each qbit along the path
fix_offset = replacement_gates[:-1] # by 1, this may be a problem if we swapped the control qbit
fix_offset.reverse() # along the path, so we need to fix the shift before we go ahead
replacement_gates += fix_offset # this simply involves performing the same swaps in the reverse order
# excluding the very final swap in the first case
return replacement_gates
def circ_router(circ, topology):
'''Takes a compiled circuit, and a topology to produce a
properly routed circuit. '''
gate_lst, num_qbits = utils.read_circ(circ)
for index, gate in enumerate(gate_lst): # iterate through the circuit
curr_gate_str = gate_lst[index][0]
curr_qbit_lst = gate_lst[index][1]
curr_parms = gate_lst[index][2]
if curr_gate_str == 'Cz': # check if this gate is a cz gate
cntrl_qbit = curr_qbit_lst[0]
trgt_qbit = curr_qbit_lst[1]
if not trgt_qbit in topology[cntrl_qbit]: # check if the control and target qbits are 'connected'
new_target = topology[cntrl_qbit][1] # if not, choose a qbit that is connected to the control
# to be the new target qbit
path = get_path(topology, new_target, trgt_qbit) # find the path between the new target and the old target
first_swaps = get_swaps(path) # the swap gates required to swap new_target w/ old target
path.reverse()
swap_backs = get_swaps(path) # the swap gates required to swap them back to original
replacement_lst = first_swaps + [(curr_gate_str, [cntrl_qbit, new_target], curr_parms)] + swap_backs
del gate_lst[index] # we delete the old Cz gate
for j, replacement in enumerate(replacement_lst): # and add the swap + cz gate + swap back gates
gate_lst.insert(index + j, replacement)
compiled_circ = utils.write_circ(gate_lst, num_qbits) #
return compiled_circ
## Testing the qbit router:
topology = {0:[4, 1], 1:[0, 2], 2:[1, 3], 3:[2, 4], 4:[3, 0]} # the key is the qbit and the value is the set of connected qbits
for i in range(1000):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # generate random circuit
# print(circ)
# print('\n\n')
compiled_circ = compiler(circ) # compile it
# print(compiled_circ)
# print('\n\n')
routed_circ = circ_router(compiled_circ, topology) # route it
# print(routed_circ)
equal1 = utils.circ_equal(circ, routed_circ) # make sure the routed circuit matches the original circuit
equal2 = utils.circ_equal(compiled_circ, routed_circ) # make sure the routed circuit matches the compiled circuit
if not equal1.all():
print(equal1)
break
if not equal2.all():
print(equal2)
break
print('passed test #{}'.format(i+1))
# Here we analyze the overhead (again, again)
topology = {0:[4, 1], 1:[0, 2], 2:[1, 3], 3:[2, 4], 4:[3, 0]}
depth_array = []
compiled_depth_array = []
optimized_depth_array = []
routed_depth_array = []
ratio_simple = []
ratio_opt = []
ratio_routed = []
for i in range(100):
circ = utils.random_circ_generator(num_qbits=5, num_gates=15) # randomly generate a circuit with 5 qbits and 15 gates
compiled_circ = simple_compiler(circ)
optimized_circ = compiler(circ)
routed_circ = circ_router(optimized_circ, topology)
equal1 = utils.circ_equal(circ, compiled_circ)
equal2 = utils.circ_equal(circ, optimized_circ)
equal3 = utils.circ_equal(circ, routed_circ)
if not equal1.all(): # make sure we are compiling and routing properly!
print("simple compiler FAIL @ circuit {}".format(i))
break
if not equal2.all():
print("optimized compiler FAIL @ circuit {}".format(i))
break
if not equal3.all():
print("circuit router FAIL @ circuit {}".format(i))
break
depth_circ = circ.depth()
depth_comp = compiled_circ.depth()
depth_opt = optimized_circ.depth()
depth_rout = routed_circ.depth()
depth_array.append(depth_circ) # store depth
compiled_depth_array.append(depth_comp) # store new circuit depth
optimized_depth_array.append(depth_opt) # store optimized circuit depth
routed_depth_array.append(depth_rout)
ratio_simple.append(((depth_comp - depth_circ) / depth_circ) * 100)
ratio_opt.append(((depth_opt - depth_circ) / depth_circ) * 100)
ratio_routed.append(((depth_rout - depth_circ) / depth_circ) * 100)
print('average initial circuit depth = {} +/- {}'.format(np.mean(depth_array), np.std(depth_array)))
print('average compiled circuit depth = {} +/- {}'.format(np.mean(compiled_depth_array), np.std(compiled_depth_array)))
print('average optimized circuit depth = {} +/- {}'.format(np.mean(optimized_depth_array), np.std(optimized_depth_array)))
print('average routed circuit depth = {} +/- {}'.format(np.mean(routed_depth_array), np.std(routed_depth_array)))
print('average increase in depth from simple compiler = {}%'.format(np.round(np.mean(ratio_simple), decimals=2)))
print('average increase in depth from optimized compiler = {}%'.format(np.round(np.mean(ratio_opt), decimals=2)))
print('average increase in depth from circuit rout = {}%'.format(np.round(np.mean(ratio_routed), decimals=2)))
plt.plot(depth_array, label='circuit')
plt.plot(compiled_depth_array, label='compiled')
plt.plot(optimized_depth_array, label='optimized')
plt.plot(routed_depth_array, label='routed', linestyle='--')
plt.legend()
plt.show()
|
https://github.com/Jaybsoni/QuantumCompiler
|
Jaybsoni
|
# Main file for Quantum Compiler Utility Functions
from qiskit import *
import numpy as np
import random
# Constants ------------------------------------------------------------------------------------------------
# All of the 'basic' qiskit gate types
Id = qiskit.circuit.library.standard_gates.i.IGate
H = qiskit.circuit.library.standard_gates.h.HGate
X = qiskit.circuit.library.standard_gates.x.XGate
Y = qiskit.circuit.library.standard_gates.y.YGate
Z = qiskit.circuit.library.standard_gates.z.ZGate
Rx = qiskit.circuit.library.standard_gates.rx.RXGate
Ry = qiskit.circuit.library.standard_gates.ry.RYGate
Rz = qiskit.circuit.library.standard_gates.rz.RZGate
Cx = qiskit.circuit.library.standard_gates.x.CXGate
Cz = qiskit.circuit.library.standard_gates.z.CZGate
S = qiskit.circuit.library.standard_gates.swap.SwapGate
list_of_gates = [Id, H, X, Y, Z, Rx, Ry, Rz, Cx, Cz, S] # storing the gate types in list
gate_str_dict = {0: 'I', 1: 'H', 2: 'X', 3: 'Y', 4: 'Z', 5: 'Rx', 6: 'Ry', 7: 'Rz', 8: 'Cx', 9: 'Cz', 10: 'S'}
gate_func_dict = {'I': qiskit.QuantumCircuit.id, # A dict relating the gate (str) to its qiskit func call
'H': qiskit.QuantumCircuit.h, # I use this to create the final (compiled) qiskit circuit object
'X': qiskit.QuantumCircuit.x,
'Y': qiskit.QuantumCircuit.y,
'Z': qiskit.QuantumCircuit.z,
'Rx': qiskit.QuantumCircuit.rx,
'Ry': qiskit.QuantumCircuit.ry,
'Rz': qiskit.QuantumCircuit.rz,
'Cx': qiskit.QuantumCircuit.cx,
'Cz': qiskit.QuantumCircuit.cz,
'S': qiskit.QuantumCircuit.swap}
# Functions ------------------------------------------------------------------------------------------------
def read_circ(circ):
"""
Takes a qiskit circuit and creates list of tuples (gate_lst),
returns the gate_lst along with the num of qbits.
This list will be used to re-construct the circuit afterwards.
:param circ: Qiskit QuantumCircuit object
:return: gate_lst, num_qbits: a list of tuples and an int
"""
gate_lst = []
num_qbits = circ.num_qubits
meta_data = circ.data # read circuit meta data
for element in meta_data:
gate_type = type(element[0]) # read the gate type
gate_str = gate_str_dict[list_of_gates.index(gate_type)] # determine the gate_str from its type
qbit_lst = [qbit.index for qbit in element[1]] # list of the qbit indicies that the gate acts on
parameter_lst = element[0].params # list of parameters used by the gate
gate_lst.append((gate_str, qbit_lst, parameter_lst)) # store these directly into tuple
# store all such tuples in a list (in order)
return gate_lst, num_qbits
def write_circ(gate_lst, num_qbits):
"""
Takes a gate_lst and num_qbits to create a qiskit quantum circuit object.
We assume that the circuit has the same number of qbits and bits, and we measure
each qbit to its associated bit at the end of the circuit for simplicity
:param gate_lst: list of tuples, containing the meta_data of the circuit
:param num_qbits: int, number of qbits in circuit
:return: circ: Qiskit QuantumCircuit object
"""
circ = qiskit.QuantumCircuit(num_qbits) # construct an empty circuit with specified number of qbits
for gate in gate_lst: # iterate over list of gate information
gate_str = gate[0]
qbits = gate[1]
if gate_str in ['Cx', 'Cz', 'S']: # apply Cx, Cz, or S gates
gate_func_dict[gate_str](circ, qbits[0], qbits[1])
elif gate_str in ['Rx', 'Ry', 'Rz']: # apply Rx, Ry or Rz gates with parameter
parameter = gate[2][0]
gate_func_dict[gate_str](circ, parameter, qbits)
else: # apply single qbit non parameterized gates
gate_func_dict[gate_str](circ, qbits)
return circ # return final circuit
# a means to augment the gates in the circuit
def general_replace(gate_lst, gate_name, replacement_gates):
"""
searches through gate_lst for all instances of 'gate_name' gate and
replaces them with the set of gates stored in replacement_gates which is
a list of gate tuples.
A gate tuple will contain ('new_gate_str', [new_qbits] or func, [params] or func)
where 'new_gate_str' is the name of the new gate, [new_qbits] is a list of qbits
the new gate will act on. Note if this is empty, then it acts on the same qbits
as the old gate. if a func is provided, then it applies that func to the
old list of qbits to determine the new list of qbits. Finally, [params] is
a list of new parameters or a function which will be applied to the old parameters
in order to determine the new parameters
:param gate_lst: a list containing tuples eg. ('gate_str', [qbits], [params])
:param gate_name: a str, represents the quantum gate being applied
:param replacement_gates: a list of tuples, ('new_gate_str', [new_qbits] or func, [params] or func)
:return: None
"""
for index, gate in enumerate(gate_lst): # iterate through the gate list
gate_str = gate[0]
if gate_str == gate_name: # find and delete the gate tuple with name 'gate_name'
qbits = gate[1]
parms = gate[2]
del gate_lst[index]
for i, new_gate_tuple in enumerate(replacement_gates): # replace it with the set of gates provided
replacement_gate_name = new_gate_tuple[0]
replacement_qbits = new_gate_tuple[1]
replacement_params = new_gate_tuple[2]
if type(replacement_qbits) != list: # in some cases we may want the replacement_qbits to be a
# function of the current params, in this case replacement_params is not a list, but a function
replacement_qbits = [replacement_qbits(qbits)]
elif not replacement_qbits: # if no replacement qbit indicies have been specified,
replacement_qbits = qbits # just apply the replacement gate on the same qbits as the old gate
if type(replacement_params) != list: # in some cases we may want the replacement_params to be a
# function of the current params, in this case replacement_params is not a list, but a function
replacement_params = [replacement_params(parms)]
new_gate = (replacement_gate_name, replacement_qbits, replacement_params)
gate_lst.insert(index + i, new_gate)
return
def random_circ_generator(num_qbits=0, num_gates=0):
"""
Generate a random qiskit circuit made up of the given 'simple'
gates. One can specify the num of qbits and num of gates in the circuit.
If unspecified, they will be randomly determined
:param num_qbits: int, optional number of qbits in circuit
:param num_gates: int, optional number of gates
:return: qiskit QuantumCircuit object
"""
if num_qbits == 0:
num_qbits = random.randint(1, 5) # randomly pick # of qbits 1 - 5
if num_gates == 0:
num_gates = random.randint(5, 25) # randomly pick # of gates 5 - 25
gate_lst = []
for i in range(num_gates): # iterate over the number of gates
if num_qbits == 1: # if there is only 1 qbit then,
gate_index = random.randint(0, 7) # pick a single qbit gate at random
else:
gate_index = random.randint(0, 9) # pick any gate at random
gate_str = gate_str_dict[gate_index]
control_index = random.randint(0, num_qbits - 1) # pick a qbit to apply gate too
parameter = []
qbits = [control_index]
if gate_str in ['Cx', 'Cz']: # if the gate is a ControlX or ControlZ
target_index = random.randint(0, num_qbits - 1) # pick target qbit
if target_index == control_index: # make sure its not the same as the control qbit
if control_index == num_qbits - 1:
target_index = control_index - 1
else:
target_index = control_index + 1
qbits.append(target_index)
elif gate_str in ['Rx', 'Ry']: # if the gate has a theta parameter
parameter.append(random.random() * (2 * np.pi)) # randomly select parameter
elif gate_str == 'Rz': # if the gate has a phi parameter
parameter.append(random.random() * np.pi) # randomly select parameter
gate_lst.append((gate_str, qbits, parameter)) # add the meta_data to the gate_lst
circ = write_circ(gate_lst, num_qbits) # construct qiskit circuit
return circ
def circ_equal(circ1, circ2):
"""
Checks if two circuits generate the same statevector.
If they are different it prints them.
:param circ1: Qiskit QuantumCircuit object
:param circ2: Qiskit QuantumCircuit object
:return: Bool, True if the state vectors are equal
"""
backend = Aer.get_backend('statevector_simulator') # get simulator
job1 = execute(circ1, backend)
job2 = execute(circ2, backend)
result1 = job1.result()
result2 = job2.result()
circ1_statevect = result1.get_statevector(circ1)
mag_circ1_statevect = np.sqrt(circ1_statevect * np.conj(circ1_statevect))
circ2_statevect = result2.get_statevector(circ2)
mag_circ2_statevect = np.sqrt(circ2_statevect * np.conj(circ2_statevect))
equal = np.isclose(mag_circ1_statevect, mag_circ2_statevect) # check if entries are within tolerance of each other
if not equal.all():
print(np.round(circ1_statevect, decimals=3))
print(np.round(mag_circ1_statevect, decimals=3))
print(np.round(circ2_statevect, decimals=3))
print(np.round(mag_circ2_statevect, decimals=3))
return equal
def get_first(lst):
return lst[0] # kinda self explanatory
def get_second(lst):
return lst[1] # equally self explanatory
def main():
return
if __name__ == '__main__':
main()
|
https://github.com/QuantumVic/discrete-time-quantum-walks
|
QuantumVic
|
from qiskit import *
import numpy as np
import matplotlib as mpl
from qiskit.tools.visualization import plot_histogram, plot_state_city
state_sim = Aer.get_backend('statevector_simulator')
qasm_sim = Aer.get_backend('qasm_simulator')
qiskit.__qiskit_version__ #developed in q0.14.0, q-terra0.11.0
# Definition of c_Increment, c_Decrement gates
def increment(qc,qr):
for i in range(num_qubits - 1):
qc.mct(qr[0:num_qubits - 1 - i], qr[num_qubits - 1 - i] , qr_aux)
def decrement(qc,qr):
for i in range(num_qubits - 1):
qc.x(qr[0:num_qubits - 1 - i])
qc.mct(qr[0:num_qubits - 1 - i], qr[num_qubits - 1 - i], qr_aux)
qc.x(qr[0:num_qubits - 1 - i])
# Definition of QW cycle
def quantum_walk(qc,qr,num_steps):
for i in range(num_steps):
qc.h(qr[0])
increment(qc,qr)
decrement(qc,qr)
# Input total number of qubits = nodes + coin
num_qubits = 7
# Define qRegister and qCircuit
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits - 1, 'cr')
# We need (num_control - 2) aux qubits for mct
if num_qubits > 3:
qr_aux = QuantumRegister(num_qubits - 3, 'aux')
qc = QuantumCircuit(qr,qr_aux,cr)
else:
qr_aux = None
qc = QuantumCircuit(qr,cr)
# Initialization for symmetric state 1/sqrt(2) [|0> + i|1>] of coin
qc.h(qr[0])
qc.s(qr[0])
qc.barrier(qr,qr_aux)
# Initialization for middle state |1000...> of nodes
qc.x(qr[num_qubits - 1])
qc.barrier(qr,qr_aux)
# Running the QW
quantum_walk(qc,qr,1)
qc.measure(qr[1:num_qubits],cr)
qc.draw(output='mpl')
# Execute the circuit
result = execute(qc, qasm_sim).result()
results_dict = result.get_counts()
# Convert the results to decimal value of cReg and plot
results_dec = {}
for key, value in results_dict.items():
results_dec[str(int(key,2))] = value
plot_histogram(results_dec)
|
https://github.com/QuantumVic/discrete-time-quantum-walks
|
QuantumVic
|
from qiskit import *
import numpy as np
import matplotlib as mpl
from qiskit.tools.visualization import plot_histogram, plot_state_city
from qiskit.tools.monitor import job_monitor
state_sim = Aer.get_backend('statevector_simulator')
qasm_sim = Aer.get_backend('qasm_simulator')
unitary_sim = Aer.get_backend('unitary_simulator')
qiskit.__qiskit_version__ ## developed in q0.14.0, q-terra0.11.0
## Definition of c_Increment, c_Decrement gates
def increment(qc,qr):
"""controlled-increment gate, cf. PhysRevA.72.032329"""
for i in range(num_qubits - 1):
qc.mct(qr[0:num_qubits - 1 - i],
qr[num_qubits - 1 - i],
qr_aux)
def decrement(qc,qr):
"""controlled-decrement gate, cf. PhysRevA.72.032329"""
for i in range(num_qubits - 1):
qc.mct(qr[0:i+1],
qr[i+1],
qr_aux)
## Definition of QW step
def quantum_walk(qc,qr,n):
"""implement DTQW on a previously defined circuit and register cf. PhysRevA.72.032329"""
for i in range(n):
## coin operator
qc.h(qr[0])
## shift right
increment(qc,qr)
## shift left
qc.x(qr[0])
decrement(qc,qr)
## back to original state
qc.x(qr[0])
qc.barrier()
## PARAMETERS : number of qubits and steps
num_qubits = 4
num_steps = 1
## Define qRegister and cRegister
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits - 1, 'cr')
## Define qCircuit
## We need aux qubits for the mct gates: (num_qubits - 3)
if num_qubits > 3:
qr_aux = QuantumRegister(num_qubits - 3, 'aux')
qc = QuantumCircuit(qr, qr_aux, cr)
else:
qr_aux = None
qc = QuantumCircuit(qr, cr)
## Initialization of the nodes
qc.x(qr[num_qubits-1]) # Initial state = 1000000...0000
qc.barrier()
## Initialization of the coin (symmetrical)
qc.h(qr[0])
qc.s(qr[0])
qc.barrier()
## Repeat the quantum walk for num_steps
quantum_walk(qc, qr, num_steps)
## Measure the node qubits
qc.measure(qr[1:num_qubits], cr)
qc.draw(output='mpl')
qc.depth()
job = execute(qc, backend = qasm_sim)
shots = job.result().get_counts()
plot_histogram(shots)
|
https://github.com/QuantumVic/discrete-time-quantum-walks
|
QuantumVic
|
from qiskit import *
import numpy as np
import matplotlib as mpl
from qiskit.tools.visualization import plot_histogram, plot_state_city
from qiskit.tools.monitor import job_monitor
state_sim = Aer.get_backend('statevector_simulator')
qasm_sim = Aer.get_backend('qasm_simulator')
unitary_sim = Aer.get_backend('unitary_simulator')
qiskit.__qiskit_version__ #developed in q0.14.0, q-terra0.11.0
# Definition of c_Increment, c_Decrement gates #
def increment(qc,qr):
"""controlled-increment gate, cf. PhysRevA.72.032329"""
for i in range(num_qubits - 1):
qc.mct(qr[0:num_qubits - 1 - i], qr[num_qubits - 1 - i] , qr_aux)
def decrement(qc,qr):
"""controlled-decrement gate, cf. PhysRevA.72.032329"""
for i in range(num_qubits - 1):
qc.mct(qr[0:i+1], qr[i+1], qr_aux)
# Definition of QW cycle #
def quantum_walk(qc,qr):
"""implement DTQW on a previously defined circuit and register cf. PhysRevA.72.032329"""
# coin operator
qc.h(qr[0])
#conditional shift
increment(qc,qr)
qc.x(qr[0])
decrement(qc,qr)
qc.x(qr[0])
# Definition of circuit analysis functions #
def get_tot_gates(qc):
"""get the total number of basic gates of a circuit"""
tot_gates = 0
for key in qc.decompose().count_ops():
tot_gates = tot_gates + qc.decompose().count_ops()[key]
return tot_gates
def get_cx_gates(qc):
"""get the total number of cx gates of a circuit"""
cx_gates = qc.decompose().count_ops()['cx']
return cx_gates
# Total number of qubits (lattice nodes + coin)
num_qubits = 4
num_steps = 1
# Define qRegister and cRegister
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits - 1, 'cr')
# Define qCircuit
# We need (num_qubits - 3) aux qubits for mct-gates
if num_qubits > 3:
qr_aux = QuantumRegister(num_qubits - 3, 'aux')
qc = QuantumCircuit(qr,qr_aux,cr)
else:
qr_aux = None
qc = QuantumCircuit(qr,cr)
# BEGINNING OF QUANTUM CIRCUIT
# Initialization for symmetric state 1/sqrt(2) [|0> + i|1>] of coin
qc.h(qr[0])
qc.s(qr[0])
qc.barrier()
# Initialization for middle state |1000...> of nodes
qc.x(qr[num_qubits - 1])
qc.barrier()
# Running the QW
for i in range(num_steps):
quantum_walk(qc,qr)
qc.barrier()
# Measurement
qc.measure(qr[1:num_qubits],cr)
qc.draw(output='mpl')
# Execute the circuit
job = execute(qc, backend=qasm_sim)
results_dict = job.result().get_counts()
# Convert the results to decimal value of cReg and plot
results_dec = {}
for key, value in results_dict.items():
results_dec[str(int(key,2))] = value
plot_histogram(results_dec)
|
https://github.com/QuantumVic/discrete-time-quantum-walks
|
QuantumVic
|
from qiskit import *
import numpy as np
import matplotlib as mpl
from qiskit.tools.visualization import plot_histogram, plot_state_city
from qiskit.tools.monitor import job_monitor
state_sim = Aer.get_backend('statevector_simulator')
qasm_sim = Aer.get_backend('qasm_simulator')
unitary_sim = Aer.get_backend('unitary_simulator')
qiskit.__qiskit_version__ #developed in q0.14.0, q-terra0.11.0
# Definition of c_Increment, c_Decrement gates
def increment(qc,qr):
"""controlled-increment gate, cf. PhysRevA.72.032329"""
for i in range(num_qubits - 1):
qc.mct(qr[0:num_qubits - 1 - i], qr[num_qubits - 1 - i] , qr_aux)
def decrement(qc,qr):
"""controlled-decrement gate, cf. PhysRevA.72.032329"""
for i in range(num_qubits - 1):
qc.mct(qr[0:i+1], qr[i+1], qr_aux)
# Definition of QW cycle
def quantum_walk(qc,qr):
"""implement DTQW on a previously defined circuit and register cf. PhysRevA.72.032329"""
# coin operator
qc.h(qr[0])
# conditional shift
increment(qc,qr)
qc.x(qr[0])
decrement(qc,qr)
qc.x(qr[0])
def get_tot_gates(qc):
"""get the total number of basic gates of a circuit"""
tot_gates = 0
for key in qc.decompose().count_ops():
tot_gates = tot_gates + qc.decompose().count_ops()[key]
return tot_gates
def get_cx_gates(qc):
"""get the total number of cx gates of a circuit"""
cx_gates = qc.decompose().count_ops()['cx']
return cx_gates
# Analysis of gate number
x = [] # number of qubits
y = [] # number of gates of one QW step
for q in range(4,30):
# Total number of qubits (nodes + coin)
num_qubits = q
# Define qRegister and qCircuit
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits - 1, 'cr')
# We need (num_control - 2) aux qubits for mct
if num_qubits > 3:
qr_aux = QuantumRegister(num_qubits - 3, 'aux')
qc = QuantumCircuit(qr,qr_aux,cr)
else:
qr_aux = None
qc = QuantumCircuit(qr,cr)
# Running the QW
quantum_walk(qc,qr)
x.append(q)
y.append(get_cx_gates(qc))
print(x,y)
mpl.pyplot.plot(x,y)
mpl.pyplot.xlabel('number of qubits')
mpl.pyplot.ylabel('number of gates')
mpl.pyplot.title('Number of CX gates required for a QW step')
mpl.pyplot.grid(True)
mpl.pyplot.show()
|
https://github.com/kazawai/shor_qiskit
|
kazawai
|
from numpy import pi, random
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit_aer import AerSimulator
backend = AerSimulator()
def bell_state():
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.cx(q[0], q[1])
return circuit
def algorithm(value1, value2, circuit):
theta1, theta2 = 0, pi / 8
if value1 == 1:
theta1 = pi / 4
if value2 == 1:
theta2 = -theta2
circuit.ry(theta1, 0)
circuit.ry(theta2, 1)
return circuit
def quantum_strategy(x, y):
circuit = bell_state()
circuit = algorithm(x, y, circuit)
circuit.measure([0, 1], [0, 1])
result = backend.run(circuit).result()
counts = result.get_counts(circuit)
counts = dict(counts)
return int(list(counts.keys())[0][0]), int(list(counts.keys())[0][1])
def bell_inequality_game(nb_tries):
wins = 0
for _ in range(nb_tries):
x = random.randint(2)
y = random.randint(2)
a, b = quantum_strategy(x, y)
wins += x * y == (a and b)
print(f"Quantum strategy wins {wins}/{nb_tries} times")
if __name__ == "__main__":
bell_inequality_game(1000)
|
https://github.com/kazawai/shor_qiskit
|
kazawai
|
from qiskit import (ClassicalRegister, QuantumCircuit, QuantumRegister,
transpile)
from qiskit_aer import AerSimulator
# Define the quantum circuit
qc = QuantumCircuit(QuantumRegister(1), ClassicalRegister(1))
# Append the X operator
qc.x(0)
# Measure the qubit
qc.measure(0, 0)
# Display the circuit
print(qc)
# Transpile the circuit for the AerSimulator
aer_sim = AerSimulator()
aer_sim_transpile = transpile(qc, aer_sim)
# Simulate the transpiled circuit 10 times
result = aer_sim.run(aer_sim_transpile, shots=10).result()
# Print the result
print(result.get_counts())
|
https://github.com/kazawai/shor_qiskit
|
kazawai
|
"""The following is python code utilizing the qiskit library that can be run on extant quantum
hardware using 5 qubits for factoring the integer 15 into 3 and 5. Using period finding,
for a^r mod N = 1, where a = 11 and N = 15 (the integer to be factored) the problem is to find
r values for this identity such that one can find the prime factors of N. For 11^r mod(15) =1,
results (as shown in fig 1.) correspond with period r = 4 (|00100>) and r = 0 (|00000>).
To find the factor, use the equivalence a^r mod 15. From this:
(a^r -1) mod 15 = (a^(r/2) + 1)(a^(r/2) - 1) mod 15.In this case, a = 11. Plugging in the two r
values for this a value yields (11^(0/2) +1)(11^(4/2) - 1) mod 15 = 2*(11 +1)(11-1) mod 15
Thus, we find (24)(20) mod 15. By finding the greatest common factor between the two coefficients,
gcd(24,15) and gcd(20,15), yields 3 and 5 respectively. These are the prime factors of 15,
so the result of running shors algorithm to find the prime factors of an integer using quantum
hardware are demonstrated. Note, this is not the same as the technical implementation of shor's
algorithm described in this section for breaking the discrete log hardness assumption,
though the proof of concept remains."""
# Import libraries
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from numpy import pi
from qiskit import IBMQ, Aer, QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.providers.ibmq import least_busy
from qiskit.visualization import plot_histogram
# Initialize qubit registers
qreg_q = QuantumRegister(5, 'q')
creg_c = ClassicalRegister(5, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.reset(qreg_q[0])
circuit.reset(qreg_q[1])
circuit.reset(qreg_q[2])
circuit.reset(qreg_q[3])
circuit.reset(qreg_q[4])
# Apply Hadamard transformations to qubit registers
circuit.h(qreg_q[0])
circuit.h(qreg_q[1])
circuit.h(qreg_q[2])
# Apply first QFT, modular exponentiation, and another QFT
circuit.h(qreg_q[1])
circuit.cx(qreg_q[2], qreg_q[3])
circuit.crx(pi/2, qreg_q[0], qreg_q[1])
circuit.ccx(qreg_q[2], qreg_q[3], qreg_q[4])
circuit.h(qreg_q[0])
circuit.rx(pi/2, qreg_q[2])
circuit.crx(pi/2, qreg_q[1], qreg_q[2])
circuit.crx(pi/2, qreg_q[1], qreg_q[2])
circuit.cx(qreg_q[0], qreg_q[1])
# Measure the qubit registers 0-2
circuit.measure(qreg_q[2], creg_c[2])
circuit.measure(qreg_q[1], creg_c[1])
circuit.measure(qreg_q[0], creg_c[0])
# Get least busy quantum hardware backend to run on
provider = IBMQ.load_account()
device = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and
not x.configuration().simulator and x.status().operational==True))
print("Running on current least busy device: ", device)
# Run the circuit on available quantum hardware and plot histogram
from qiskit.tools.monitor import job_monitor
job = execute(circuit, backend=device, shots=1024, optimization_level=3)
job_monitor(job, interval = 2)
results = job.result()
answer = results.get_counts(circuit)
plot_histogram(answer)
#largest amplitude results correspond with r values used to find the prime factor of N.
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
import matplotlib as plt
import numpy
import math
from qiskit import QuantumCircuit, QuantumRegister, assemble, Aer, BasicAer, execute
from qiskit.quantum_info import Statevector, state_fidelity, average_gate_fidelity, process_fidelity, hellinger_fidelity
from qiskit.quantum_info.operators import Operator
from qiskit.visualization import plot_histogram
from qiskit.extensions import XGate
import qiskit.tools.jupyter
%qiskit_version_table
qc = QuantumCircuit(4, 4)
qc.draw('mpl')
qc = QuantumCircuit(4)
qc.draw('mpl')
qc = QuantumCircuit(QuantumRegister(4, 'qr0'), QuantumRegister(4, 'crl'))
qc.draw('mpl')
qc = QuantumCircuit([4, 4])
qc.draw('mpl')
qc = QuantumCircuit(1,1)
qc.ry(3 * math.pi/4, 0)
qc.measure(0,0)
qc.draw('mpl')
qasmsim = Aer.get_backend('qasm_simulator')
qobj = assemble(qc) # Assemble circuit into a Qobj that can be run
counts = qasmsim.run(qobj).result().get_counts() # Do the simulation, returning the state vector
plot_histogram(counts) # Display the output state vector
inp_reg = QuantumRegister(2, name='inp')
ancilla = QuantumRegister(1, name='anc')
qc = QuantumCircuit(inp_reg, ancilla)
qc.h(inp_reg[0:2])
qc.x(ancilla[0])
qc.draw('mpl')
qc = QuantumCircuit(3, 3)
qc.measure([0,1,2], [0,1,2])
qc.draw('mpl')
qc = QuantumCircuit(3, 3)
qc.measure_all()
qc.draw('mpl')
bell = QuantumCircuit(2)
bell.h(0)
bell.x(1)
bell.cx(0, 1)
bell.draw('mpl')
qc = QuantumCircuit(2)
v = [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)]
qc.initialize(v, [0,1])
simulator = Aer.get_backend('statevector_simulator')
result = execute(qc, simulator).result()
statevector = result.get_statevector()
print(statevector)
qc= QuantumCircuit(3, 3)
qc.barrier()
qc.draw('mpl')
qc= QuantumCircuit(3, 3)
qc.barrier(qc)
qc.draw('mpl')
qc= QuantumCircuit(3, 3)
qc.barrier(3)
qc.draw('mpl')
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.barrier(0)
qc.cx(0,1)
qc.barrier([0,1])
qc.draw('mpl')
print("Circuit depth: ", qc.depth())
qc = QuantumCircuit(3)
# Insert code fragment here
qasm_sim = Aer.get_backend('qasm_simulator')
couple_map = [[0, 1], [1, 2]]
job = execute(qc, backend=qasm_sim, shots=1024, coupling_map=couple_map)
result = job.result()
print(result)
qc = QuantumCircuit(3)
# Insert code fragment here
qasm_sim = Aer.get_backend('ibmq_simulator')
couple_map = [[0, 1], [0, 2]]
job = execute(qc, loop=1024, coupling_map=couple_map)
result = job.result()
print(result)
qc = QuantumCircuit(3)
# Insert code fragment here
qasm_sim = Aer.get_backend('qasm_simulator')
couple_map = [[0, 1], [1, 2]]
job = execute(qc, backend=qasm_sim, repeat=1024, coupling_map=couple_map)
result = job.result()
print(result)
qc = QuantumCircuit(3)
# Insert code fragment here
qasm_sim = Aer.get_backend('qasm_simulator')
couple_map = [[0, 1], [1, 2]]
job = execute(backend=qasm_sim, qc, shot=1024, coupling_map=couple_map)
result = job.result()
print(result)
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(3)
# insert code here
execute(qc, backend, shots=1024, coupling_map=[[0, 1], [1, 2]])
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(3)
# insert code here
execute(qc, backend, shots=1024, custom_topology=[[0, 1], [2, 3]])
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(3)
# insert code here
execute(qc, backend, shots=1024, device="qasm_simulator", mode="custom")
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(3)
# insert code here
execute(qc, backend, mode="custom")
op = Operator.Xop(0)
qc = QuantumCircuit(1)
op = Operator([[0, 1]])
qc.append(op)
qc_a = QuantumCircuit(1)
op_a = Operator(XGate())
qc_a.append(op_a, [0])
state_a = Statevector.from_instruction(qc_a)
qc_b = QuantumCircuit(1)
op_b = numpy.exp(1j * 0.5) * Operator(XGate())
qc_b.append(op_b, [0])
state_b = Statevector.from_instruction(qc_b)
state_fidelity(state_a, state_b)
process_fidelity(op_a, op_b)
average_gate_fidelity(op_a, op_b)
qc = QuantumCircuit(2, 2)
qc.x(0)
qc.measure([0,1], [0,1])
simulator = Aer.get_backend('qasm_simulator')
result = execute(qc, simulator, shots=1000).result()
counts = result.get_counts(qc)
print(counts)
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
from qiskit import QuantumCircuit
bell = QuantumCircuit(2)
bell.h(0)
bell.cx(0, 1)
bell.measure_all()
qasm_str = bell.qasm()
qc2 = QuantumCircuit().from_qasm_str(qasm_str)
qc2.draw()
qc3 = QuantumCircuit().from_qasm_file('qasm')
qc3.draw()
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
from qiskit import *
from qiskit.circuit import ParameterVector, QuantumCircuit
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
n = 3
param_list = ParameterVector('param_list', n)
qc = QuantumCircuit(n, 1)
qc.h(0)
for i in range(n-1):
qc.cx(i, i+1)
qc.barrier()
for i in range(n):
qc.rz(param_list[i], i)
qc.barrier()
for i in reversed(range(n-1)):
qc.cx(i, i+1)
qc.h(0)
qc.measure(0, 0)
qc.draw()
param_dict = dict(zip(param_list.params, [0.1 ,0.2, 0.3]))
param_dict
qc.assign_parameters(param_dict, inplace=True)
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(1)
init_state = [0, 1]
qc.initialize(init_state, 0)
qc.draw()
from qiskit import assemble, Aer
backend = Aer.get_backend('statevector_simulator')
## optional ##
# assemble a list of circuits and create Qobj
qobj = assemble(qc) # A backwards compat (ไธไฝไบๆ) alias for QasmQobj
result = backend.run(qobj).result()
result.get_statevector()
from qiskit import execute
result = execute(qc, backend).result()
result.get_statevector()
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
from qiskit import QuantumCircuit, Aer
from qiskit.visualization import plot_state_qsphere, plot_bloch_multivector
import math
def plot_bloch_sphere(qc, title, initial):
backend = Aer.get_backend('statevector_simulator')
result = backend.run(qc).result()
sv = result.get_statevector()
return plot_bloch_multivector(sv, title=title+' Gate: initial |'+initial+'>')
def plot_qsphere_with_phase(qc):
backend = Aer.get_backend('statevector_simulator')
result = backend.run(qc).result()
sv = result.get_statevector()
return plot_state_qsphere(sv, show_state_phases=True)
qc = QuantumCircuit(1)
qc.x(0)
plot_bloch_sphere(qc, 'X', '0')
qc = QuantumCircuit(1)
qc.y(0)
plot_bloch_sphere(qc, 'Y', '0')
qc = QuantumCircuit(1)
qc.initialize([0, 1], 0)
qc.y(0)
plot_bloch_sphere(qc, 'Y', '1')
qc = QuantumCircuit(1)
qc.z(0)
plot_bloch_sphere(qc, 'Z', '0')
qc = QuantumCircuit(1)
qc.initialize([0, 1], 0)
qc.z(0)
plot_bloch_sphere(qc, 'Z', '1')
plot_qsphere_with_phase(qc)
qc = QuantumCircuit(1)
qc.h(0)
plot_bloch_sphere(qc, 'Hadamard', '0')
qc = QuantumCircuit(1)
qc.initialize([1/math.sqrt(2), 1/math.sqrt(2)])
qc.rz(math.pi/4, 0)
plot_bloch_sphere(qc, 'Rz', '+')
plot_qsphere_with_phase(qc)
qc = QuantumCircuit(1)
qc.initialize([1/math.sqrt(2), 1/math.sqrt(2)])
qc.s(0)
plot_bloch_sphere(qc, 'S', '+')
plot_qsphere_with_phase(qc)
qc = QuantumCircuit(1)
qc.initialize([1/math.sqrt(2), 1/math.sqrt(2)])
qc.t(0)
plot_bloch_sphere(qc, 'S', '+')
plot_qsphere_with_phase(qc)
from qiskit.quantum_info import Operator
XX = Operator([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
XX
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
bellOp = Operator(qc)
bellOp
qc = QuantumCircuit(2)
qc.append(bellOp, [0, 1])
qc.draw('mpl')
from qiskit.quantum_info.operators import Pauli
A = Operator(Pauli(label='X'))
B = Operator(Pauli(label='Z'))
A.tensor(B)
A.compose(B)
A+B
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
pip install qiskit-aer
import numpy as np
from qiskit import QuantumCircuit
from qiskit import Aer, transpile
from qiskit.tools.visualization import plot_histogram, plot_state_city
import qiskit.quantum_info as pi
Aer.backends()
simulator = Aer.get_backend('aer_simulator')
circ = QuantumCircuit(2)
circ.h(0)
circ.cx(0,1)
circ.measure_all()
simulator = Aer.get_backend('aer_simulator')
circ =transpile(circ, simulator)
result =simulator.run(circ).result()
counts =result.get_counts(circ)
plot_histogram(counts, title= "Bell-State counts")
result =simulator.run(circ,shots=10, memory=True).result()
memory =result.get_memory(circ)
print(memory)
shots = 10000
sim_stabilizer = Aer.get_backend('aer_simulator_stabilizer')
job_stabilizer = sim_stabilizer.run(circ, shots=shots)
counts_stabilizer = job_stabilizer.result().get_counts(0)
sim_statevector = Aer.get_backend('aer_simulator_statevector')
job_statevector =sim_statevector.run(circ, shots=shots)
counts_statevector = job_statevector.result().get_counts(0)
sim_density = Aer.get_backend('aer_simulator_density_matrix')
job_density = sim_density.run(circ, shots=shots)
counts_density = job_density.result().get_counts(0)
sim_mps = Aer.get_backend('aer_simulator_matrix_product_state')
job_mps = sim_mps.run(circ, shots=shots)
counts_mps =job_mps.result().get_counts(0)
plot_histogram([counts_stabilizer, counts_statevector, counts_density, counts_mps],
title='Counts for different simulation methods',
legend=['stabilizer', 'statevector',
'density_matrix', 'matrix_product_state'])
from qiskit_aer import AerError
try:
simulator_gpu = Aer.get_backend('aer_simulator')
simulator_gpu.set_options(device='GPU')
except AerError as e:
print(e)
simulator = Aer.get_backend('aer_simulator_statevector')
simulator.set_options(precision='single')
result =simulator.run(circ).result()
counts =result.get_counts(circ)
print(counts)
circ = QuantumCircuit(2)
circ.h(0)
circ.cx(0, 1)
circ.save_statevector()
simulator = Aer.get_backend('aer_simulator')
circ =transpile(circ, simulator)
result =simulator.run(circ).result()
statevector= result.get_statevector(circ)
plot_state_city(statevector,title='Bell state')
steps = 5
circ = QuantumCircuit(1)
for i in range(steps):
circ.save_statevector(label=f'psi_{i}')
circ.rx(i * np.pi / steps, 0)
circ.save_statevector(label=f'psi_{steps}')
simulator = Aer.get_backend('aer_simulator')
circ =transpile(circ, simulator)
result =simulator.run(circ).result()
data =result.data(0)
data
# Generate a random statevector
num_qubits = 2
psi = random_statevector(2 ** num_qubits, seed=100)
# Set initial state to generated statevector
circ = QuantumCircuit(num_qubits)
circ.set_statevector(psi)
circ.save_state()
# Transpile for simulator
simulator = Aer.get_backend('aer_simulator')
circ = transpile(circ, simulator)
# Run and get saved data
result = simulator.run(circ).result()
result.data(0)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
from qiskit import *
from qiskit.visualization import plot_histogram
import numpy as np
IBMQ.load_account()
IBMQ.providers()
provider = IBMQ.get_provider('ibm-q')
provider.backends()
import qiskit.tools.jupyter
backend_ex = provider.get_backend('ibmq_16_melbourne')
backend_ex
backends = provider.backends(filters =
lambda x:x.configuration().n_qubits >= 2
and not x.configuration().simulator
and x.status().operational==True
)
backends
from qiskit.providers.ibmq import least_busy
backend = least_busy(provider.backends(filters =
lambda x:x.configuration().n_qubits >= 2
and not x.configuration().simulator
and x.status().operational==True
))
backends
backend = provider.get_backend('ibmqx2')
qc_and = QuantumCircuit(3)
qc_and.ccx(0,1,2)
qc_and.draw()
qc_and.decompose().draw()
from qiskit.tools.monitor import job_monitor
def AND(inp1, inp2, backend, layout):
qc = QuantumCircuit(3, 1)
qc.reset(range(3))
if inp1=='1':
qc.x(0)
if inp2=='1':
qc.x(1)
qc.barrier()
qc.ccx(0, 1, 2)
qc.barrier()
qc.measure(2, 0)
qc_trans = transpile(qc, backend, initial_layout=layout, optimization_level=3)
job = execute(qc_trans, backend, shots=8192)
print(job.job_id())
job_monitor(job)
output = job.result().get_counts()
return qc_trans, output
qc_trans, output = AND('0', '0', backend, [0, 2, 4])
qc_trans
output
from qiskit.visualization import plot_gate_map, plot_error_map
plot_gate_map(backend)
plot_error_map(backend)
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit,execute, transpile, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
from qiskit.providers.aer import QasmSimulator
# Loading your IBM Quantum account(s)
provider = IBMQ.load_account()
from qiskit.visualization import plot_state_qsphere
from qiskit.visualization import plot_bloch_multivector
qc=QuantumCircuit(1)
statevector_simulator = Aer.get_backend('statevector_simulator')
result=execute(qc,statevector_simulator).result()
statevector_results=result.get_statevector(qc)
plot_bloch_multivector(statevector_results)
plot_state_qsphere(statevector_results)
qc.x(0)
result=execute(qc,statevector_simulator).result()
statevector_results=result.get_statevector(qc)
plot_bloch_multivector(statevector_results)
plot_state_qsphere(statevector_results)
qc.h(0)
result=execute(qc,statevector_simulator).result()
statevector_results=result.get_statevector(qc)
plot_bloch_multivector(statevector_results)
plot_state_qsphere(statevector_results)
|
https://github.com/daimurat/qiskit-implementation
|
daimurat
|
from qiskit import *
from qiskit import Aer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.operators import Z
from qiskit.aqua.operators.state_fns import StateFn, CircuitStateFn
from qiskit.aqua.operators.expectations import PauliExpectation, AerPauliExpectation
from qiskit.aqua.operators.converters import CircuitSampler
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import numpy as np
%matplotlib inline
nqubits = 6 # ้ๅญใใใๆฐ
sv = 2**nqubits # ็ถๆ
ๆฐ
t = 3.0 # ใใคใใใฏในใใทใใฅใฌใผใทใงใณใใๆ้
M = 100 # ใใญใใฟใผๅ่งฃใฎๅๅฒๆฐ
delta = t/M # ๆ้ใฎๅปใฟๅน
h = 3 # ๅค้จ็ฃๅ ด
def get_expectation_val(psi, op):
# define your backend or quantum instance
backend = Aer.get_backend('qasm_simulator')
q_instance = QuantumInstance(backend, shots=1024)
# define the state to sample
measurable_expression = StateFn(op, is_measurement=True).compose(psi)
# convert to expectation value
expectation = PauliExpectation().convert(measurable_expression)
# expectation = AerPauliExpectation().convert(measurable_expression)
# get state sampler (you can also pass the backend directly)
sampler = CircuitSampler(q_instance).convert(expectation)
# evaluate
return sampler.eval().real
#ๅ่ทฏใฎๆบๅ
circuit_trotter_transIsing = QuantumCircuit(nqubits)
# ๅๆ็ถๆ
ใฎๆบๅ
print("{}ใใใใฎๅๆ็ถๆ
ใๅ
ฅๅใใฆใใ ใใใ้ใญๅใใใฏ'+'ใ(ไพ:000+)".format(nqubits))
b_str = input() # ๅ
ฅๅใใใใฎใใคใใชๅ
for qubit in range(len(b_str)):
if b_str[qubit] == '1':
circuit_trotter_transIsing.x(qubit)
elif b_str[qubit] == '+':
circuit_trotter_transIsing.h(qubit)
arr = [] #็ตๆใๆ ผ็ดใใ้
ๅ
# ่จ็ฎ
for s in range(M):
# ใใญใใฟใผๅ่งฃใฎ1ๅๅใ
for i in range(nqubits):
circuit_trotter_transIsing.cx(i,(i+1)%nqubits)
circuit_trotter_transIsing.rz(-2*delta,(i+1)%nqubits)
circuit_trotter_transIsing.cx(i,(i+1)%nqubits)
circuit_trotter_transIsing.rx(-2*delta*h, i)
# ็ฃๅใฎๆๅพ
ๅคใๆฑใใ
psi = CircuitStateFn(circuit_trotter_transIsing)
op = Z
result = get_expectation_val(psi, op)
#็ถๆ
ใใฏใใซใฎไฟๅญ
arr.append(result)
# ็ฃๅใใคใใใฏใน่กจ็คบ
x = [i*delta for i in range(M)]
plt.xlabel("time")
plt.ylabel("magnetization")
plt.plot(x, arr)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.