code stringlengths 17 6.64M |
|---|
class _S_conjugate_operation(_operation):
'class for a conjugated S operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.sdg(range(self.num_qubits))
return QC
|
class _T_operation(_operation):
'class for a conjugated S operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.t(range(self.num_qubits))
return QC
|
class _T_conjugate_operation(_operation):
'class for a conjugated T operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC.tdg(range(self.num_qubits))
return QC
|
class _rot_operation(_operation):
def __init__(self, num_qubits: int, variablegroup_tuple: tuple, map=None):
super().__init__(num_qubits, variablegroup_tuple, map)
def apply_param_vectors(self, QC, r_star, var_param_assignment):
'\n Applies the param vectors creating a quantum circuit with the given gate.\n Contains an algorithm for Rx, Ry and Rz gates, which distinguish between the following cases:\n First: The user sets no map so a default map is given and the number of variable groups used exceeds 2, than raise an error because the default map needs exactly two arguments.\n Second: The user sets no map so a default map is given and the number of variable groups used is 1, than apply the r* (stands for rx, ry or rz) gate with the given variable but without the map.\n Third: The user sets a map with some variable groups or there are exactly 2 variable groups given.\n Args:\n QC: the quantum circuit by qiskit\n r_star: Stands for r* (rx, ry or rz); a RXGate, RYGate or RZGate (also PhaseGate)\n var_param_assignment: a dictionary, that assigns hash values of variable groups with their parameter vectors (by qiskit)\n Returns:\n QuantumCircuit\n Raises:\n ValueError, if the first case occurs.\n\n '
if (self.default_map and (len(self.variablegroup_tuple) > 2)):
raise ValueError('There are too many variable groups given without a map. There can only be one or two parameters without any given map.')
elif (self.default_map and (len(self.variablegroup_tuple) == 1)):
if (self.variablegroup_tuple[0].size == None):
for qubit in range(self.num_qubits):
QC.append(r_star(var_param_assignment[hash(self.variablegroup_tuple[0])][self.variablegroup_tuple[0].index]), [qubit], [])
self.variablegroup_tuple[0].increase_index(1)
else:
for qubit in range(self.num_qubits):
QC.append(r_star(var_param_assignment[hash(self.variablegroup_tuple[0])][(self.variablegroup_tuple[0].index % self.variablegroup_tuple[0].size)]), [qubit], [])
self.variablegroup_tuple[0].increase_index(1)
else:
for qubit in range(self.num_qubits):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(r_star(self.map(*buffer_param_vectors_list)), [qubit], [])
return QC
|
class _Rx_operation(_rot_operation):
'class for a Rx operation'
def get_circuit(self, var_param_assignment: dict):
'\n Args:\n var_param_assignment: a dictionary, that assigns hash values of variable groups with their parameter vectors (by qiskit)\n returns:\n QuantumCircuit (by qiskit)\n '
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RXGate, var_param_assignment)
return QC
|
class _Ry_operation(_rot_operation):
'class for a Ry operation'
def get_circuit(self, var_param_assignment: dict):
'\n Args:\n var_param_assignment: a dictionary, that assigns hash values of variable groups with their parameter vectors (by qiskit)\n returns:\n QuantumCircuit (by qiskit)\n '
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RYGate, var_param_assignment)
return QC
|
class _Rz_operation(_rot_operation):
'class for a Rz operation'
def get_circuit(self, var_param_assignment: dict):
'\n Args:\n var_param_assignment: a dictionary, that assigns hash values of variable groups with their parameter vectors (by qiskit)\n returns:\n QuantumCircuit (by qiskit)\n '
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RZGate, var_param_assignment)
return QC
|
class _P_operation(_rot_operation):
'class for a P operation'
def get_circuit(self, var_param_assignment: dict):
'\n Args:\n var_param_assignment: a dictionary, that assigns hash values of variable groups with their parameter vectors (by qiskit)\n returns:\n QuantumCircuit (by qiskit)\n '
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, PhaseGate, var_param_assignment)
return QC
|
class _U_operation(_operation):
'class for an U operation'
def get_circuit(self, var_param_assignment: dict):
'\n Args:\n var_param_assignment: a dictionary, that assigns hash values of variable groups with their parameter vectors (by qiskit)\n returns:\n QuantumCircuit (by qiskit)\n '
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, UGate, var_param_assignment)
return QC
def apply_param_vectors(self, QC, u_gate, var_param_assignment):
'\n Applies the param vectors creating a quantum circuit with the given gate.\n works much like the apply_param_vectors function in rot_operation but without maps\n '
for qubit in range(self.num_qubits):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(u_gate(*buffer_param_vectors_list), [qubit], [])
return QC
|
class _two_qubit_operation(_operation):
'\n parent class for any two-qubit operation\n '
def __init__(self, num_qubits: int, variablegroup_tuple: tuple, ent_strategy: str, map=None):
super().__init__(num_qubits, variablegroup_tuple, map)
self.ent_strategy = ent_strategy
def apply_param_vectors(self, QC, gate, var_param_assignment: dict):
'\n Applies the param vectors creating a quantum circuit with the given gate.\n We have to distinguish between the following cases:\n 1st: Default map exists (or not)\n 2nd: NN or AA\n 3rd: Variable group is given\n 4th: Dimension (size) is finite (or infinite)\n '
if (self.variablegroup_tuple == None):
if (self.ent_strategy == 'AA'):
for first_qubit in range((self.num_qubits - 1)):
for second_qubit in range((first_qubit + 1), self.num_qubits):
QC.append(gate(), [first_qubit, second_qubit], [])
elif (self.ent_strategy == 'NN'):
for first_qubit in range(0, (self.num_qubits - 1), 2):
QC.append(gate(), [first_qubit, (first_qubit + 1)], [])
for first_qubit in range(1, (self.num_qubits - 1), 2):
QC.append(gate(), [first_qubit, (first_qubit + 1)], [])
else:
raise ValueError('Wrong entangling strategy input.')
elif (self.default_map and (len(self.variablegroup_tuple) > 2)):
raise ValueError('In two qubit operation: There are too many variable groups given without a map. There can only be one or two parameters without any given map.')
elif (self.default_map and (len(self.variablegroup_tuple) == 1)):
if (self.ent_strategy == 'AA'):
if (self.variablegroup_tuple[0].size == None):
for first_qubit in range((self.num_qubits - 1)):
for second_qubit in range((first_qubit + 1), self.num_qubits):
QC.append(gate(var_param_assignment[hash(self.variablegroup_tuple[0])][self.variablegroup_tuple[0].index]), [first_qubit, second_qubit], [])
self.variablegroup_tuple[0].increase_index(1)
else:
for first_qubit in range((self.num_qubits - 1)):
for second_qubit in range((first_qubit + 1), self.num_qubits):
QC.append(gate(var_param_assignment[hash(self.variablegroup_tuple[0])][(self.variablegroup_tuple[0].index % self.variablegroup_tuple[0].size)]), [first_qubit, second_qubit], [])
self.variablegroup_tuple[0].increase_index(1)
elif (self.ent_strategy == 'NN'):
if (self.variablegroup_tuple[0].size == None):
for first_qubit in range(0, (self.num_qubits - 1), 2):
QC.append(gate(var_param_assignment[hash(self.variablegroup_tuple[0])][self.variablegroup_tuple[0].index]), [first_qubit, (first_qubit + 1)], [])
self.variablegroup_tuple[0].increase_index(1)
for first_qubit in range(1, (self.num_qubits - 1), 2):
QC.append(gate(var_param_assignment[hash(self.variablegroup_tuple[0])][self.variablegroup_tuple[0].index]), [first_qubit, (first_qubit + 1)], [])
self.variablegroup_tuple[0].increase_index(1)
else:
for first_qubit in range(0, (self.num_qubits - 1), 2):
QC.append(gate(var_param_assignment[hash(self.variablegroup_tuple[0])][(self.variablegroup_tuple[0].index % self.variablegroup_tuple[0].size)]), [first_qubit, (first_qubit + 1)], [])
self.variablegroup_tuple[0].increase_index(1)
for first_qubit in range(1, (self.num_qubits - 1), 2):
QC.append(gate(var_param_assignment[hash(self.variablegroup_tuple[0])][(self.variablegroup_tuple[0].index % self.variablegroup_tuple[0].size)]), [first_qubit, (first_qubit + 1)], [])
self.variablegroup_tuple[0].increase_index(1)
else:
raise ValueError('Wrong entangling strategy input.')
elif (self.ent_strategy == 'AA'):
for first_qubit in range((self.num_qubits - 1)):
for second_qubit in range((first_qubit + 1), self.num_qubits):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(gate(self.map(*buffer_param_vectors_list)), [first_qubit, second_qubit], [])
elif (self.ent_strategy == 'NN'):
for first_qubit in range(0, (self.num_qubits - 1), 2):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(gate(self.map(*buffer_param_vectors_list)), [first_qubit, (first_qubit + 1)], [])
for first_qubit in range(1, (self.num_qubits - 1), 2):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(gate(self.map(*buffer_param_vectors_list)), [first_qubit, (first_qubit + 1)], [])
else:
raise ValueError('Wrong entangling strategy input.')
return QC
|
class _CH_entangle_operation(_two_qubit_operation):
'Default class for a controlled x entangling operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CHGate, var_param_assignment)
return QC
|
class _CX_entangle_operation(_two_qubit_operation):
'Default class for a controlled x entangling operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CXGate, var_param_assignment)
return QC
|
class _CY_entangle_operation(_two_qubit_operation):
'Default class for a controlled y entangling operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CYGate, var_param_assignment)
return QC
|
class _CZ_entangle_operation(_two_qubit_operation):
'Default class for a controlled z entangling operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CZGate, var_param_assignment)
return QC
|
class _SWAP_operation(_two_qubit_operation):
'class for a SWAP operation'
def get_circuit(self, var_param_assignment=None):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, SwapGate, var_param_assignment)
return QC
|
class _CRX_operation(_two_qubit_operation):
'class for a controlled rx entangling operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CRXGate, var_param_assignment)
return QC
|
class _CRY_operation(_two_qubit_operation):
'class for a controlled ry entangling operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CRYGate, var_param_assignment)
return QC
|
class _CRZ_operation(_two_qubit_operation):
'class for a controlled rz entangling operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CRZGate, var_param_assignment)
return QC
|
class _CP_operation(_two_qubit_operation):
'class for a controlled phase entangling operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CPhaseGate, var_param_assignment)
return QC
|
class _RXX_operation(_two_qubit_operation):
'class for a RZX operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RXXGate, var_param_assignment)
return QC
|
class _RYY_operation(_two_qubit_operation):
'class for a RZX operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RYYGate, var_param_assignment)
return QC
|
class _RZX_operation(_two_qubit_operation):
'class for a RZX operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RZXGate, var_param_assignment)
return QC
|
class _RZZ_operation(_two_qubit_operation):
'class for a RZZ operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RZZGate, var_param_assignment)
return QC
|
class _CU_operation(_two_qubit_operation):
'class for a controlled u entangling operation'
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, CUGate, var_param_assignment)
return QC
def apply_param_vectors(self, QC, cu_gate, var_param_assignment):
'\n Applies the param vectors creating a quantum circuit with the given gate.\n Works much like the apply_param_vectors function in two_qubit_operation, but without distinguishing of maps (or variable groups because is there no variable groups given, it will raise an error).\n We have to distinguish between the following cases:\n 1st: NN or AA\n 2nd: Dimension (size) is finite (or infinite)\n '
if (self.ent_strategy == 'AA'):
for first_qubit in range((self.num_qubits - 1)):
for second_qubit in range((first_qubit + 1), self.num_qubits):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(cu_gate(*buffer_param_vectors_list), [first_qubit, second_qubit], [])
elif (self.ent_strategy == 'NN'):
for first_qubit in range(0, (self.num_qubits - 1), 2):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(cu_gate(*buffer_param_vectors_list), [first_qubit, (first_qubit + 1)], [])
for first_qubit in range(1, (self.num_qubits - 1), 2):
buffer_param_vectors_list = []
for variablegroup in self.variablegroup_tuple:
if (variablegroup.size == None):
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][variablegroup.index])
else:
buffer_param_vectors_list.append(var_param_assignment[hash(variablegroup)][(variablegroup.index % variablegroup.size)])
variablegroup.increase_index(1)
QC.append(cu_gate(*buffer_param_vectors_list), [first_qubit, (first_qubit + 1)], [])
else:
raise ValueError('Wrong entangling strategy input.')
return QC
|
class LayeredPQC():
'The main class. Contains a list of operations. With that one can build his circuit.'
def __init__(self, num_qubits: int, variable_groups=None):
'\n Takes number of qubits.\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits in this encoding circuit\n operation_list [list]: List of objects of the class operation or/and of the class _operation_layer, e.g. [H_operation, Rx_operation, layer1, Ry_operation, layer2,...]\n variable_groups [tuple]: Tuple of all variable groups used in this encoding circuit; ATTENTION: If there is only one variable group, be sure to type in "(x,)" and not "(x)" initializing the encoding circuit\n Only if variable_groups is not None:\n variable_name_tuple [tuple]: Tuple of variable names of each variablegroup e.g. variablegroup x_var, x_var2, p_var; variable_name_tuple = (x,x2,p);\n this is only used to create encoding circuits with Strings\n variable_groups_string_tuple [tuple]: Tuple of the hash values for each variable group, with that, you can search the position of each variable_group,\n e.g. variable_groups = (x_var, x_var2,...) with type(x_var) = variable_group and variable_string_list = (hash(x_var),hash(x_var2),...)\n layer_counter [int]: counts the number of different layers of the layer class used\n '
self._num_qubits = num_qubits
self.operation_list = []
self.variable_groups = variable_groups
if (variable_groups != None):
variable_groups_string_list = []
variable_name_list = []
for i in range(len(variable_groups)):
variable_name_list.append(variable_groups[i].variable_name)
variable_groups_string_list.append(hash(variable_groups[i]))
self.variable_name_tuple = tuple(variable_name_list)
self.variable_groups_string_tuple = tuple(variable_groups_string_list)
self.layer_counter = 0
@property
def num_qubits(self):
return self._num_qubits
def add_operation(self, operation):
'\n adds an operation to the operation_list\n Args:\n operation [_operation, _operation_layer]: an operation of the operation class or a layer of operations of the operation_layer class\n '
if isinstance(operation, _operation_layer):
self.operation_list.append(operation)
for layer_operation in operation.layer.operation_list:
variablegroup_tuple = layer_operation.variablegroup_tuple
if (variablegroup_tuple != None):
if (layer_operation.ent_strategy == None):
number_of_variables = self.num_qubits
elif (layer_operation.ent_strategy == 'NN'):
number_of_variables = (self.num_qubits - 1)
else:
number_of_variables = sum((x for x in range(1, self.num_qubits)))
variable_num_list = [(operation.num_layers * number_of_variables) for i in range(len(variablegroup_tuple))]
iteration_counter = 0
for variablegroup in variablegroup_tuple:
variablegroup.increase_used_number_of_variables(variable_num_list[iteration_counter])
iteration_counter += 1
else:
self.operation_list.append(operation)
variablegroup_tuple = operation.variablegroup_tuple
if (variablegroup_tuple != None):
if (operation.ent_strategy == None):
number_of_variables = self.num_qubits
elif (operation.ent_strategy == 'NN'):
number_of_variables = (self.num_qubits - 1)
else:
number_of_variables = sum((x for x in range(1, self.num_qubits)))
variable_num_list = [number_of_variables for i in range(len(variablegroup_tuple))]
iteration_counter = 0
for variablegroup in variablegroup_tuple:
variablegroup.increase_used_number_of_variables(variable_num_list[iteration_counter])
iteration_counter += 1
def add_layer(self, layer, num_layers=1):
'adds a layer of gates to the given encoding circuit'
new_layer = copy.copy(layer)
self.layer_counter += 1
operation_layer = _operation_layer(new_layer, num_layers, self.layer_counter)
self.add_operation(operation_layer)
def get_params(self, deep: bool=True) -> dict:
"\n Returns a dictonary of the number of qubits and the number of layers.\n E.g.: There are four qubits and two layers one with 2 applications and the other one with 4. The output looks like that:\n {'num_qubits': 4,'num_layers_1': 2, 'num_layers_2': 4}.\n If there is only one layer, there is no enumeration in num_layers:\n E.g.: 4 qubits, one layer with 3 applications:\n {'num_qubits': 4,'num_layers': 3}.\n "
param = {}
param['num_qubits'] = self._num_qubits
layer_counter = 0
for iter_layer in self.operation_list:
if isinstance(iter_layer, _operation_layer):
layer_counter += 1
param['num_layers_{}'.format(iter_layer.layer_number)] = iter_layer.num_layers
number_of_applications = iter_layer.num_layers
num_layer_name = iter_layer.layer_number
if (layer_counter == 1):
param.pop('num_layers_{}'.format(num_layer_name))
param['num_layers'] = number_of_applications
elif (layer_counter == 0):
param['num_layers'] = 1
return param
def set_params(self, **params):
'\n Sets the number of qubits or/and number of application of one or more layers:\n Possible params:\n num_qubits\n num_layers (if there is only one layer)\n num_layers_{i} (but {i} represents the i-th layer and this is only possible, if there are two or more layers)\n '
valid_params = self.get_params()
for (key, value) in params.items():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
if (key == 'num_qubits'):
if (value == self.num_qubits):
pass
else:
for operation in self.operation_list:
if isinstance(operation, _operation_layer):
operation.change_qubits(value)
else:
var_group_tuple = operation.variablegroup_tuple
operation.num_qubits = value
if (var_group_tuple != None):
if (operation.ent_strategy == None):
for var_group in var_group_tuple:
var_group.increase_used_number_of_variables((value - self.num_qubits))
elif (operation.ent_strategy == 'NN'):
for var_group in var_group_tuple:
var_group.increase_used_number_of_variables((value - self.num_qubits))
else:
for var_group in var_group_tuple:
old_num_of_variables = sum((x for x in range(1, self.num_qubits)))
new_num_of_variables = sum((x for x in range(1, value)))
var_group.increase_used_number_of_variables((new_num_of_variables - old_num_of_variables))
self._num_qubits = value
else:
if (key == 'num_layers'):
layer_number = 1
else:
layer_number = int(key[11])
op_iter = (- 1)
on_right_layer = False
while ((not on_right_layer) and (op_iter < len(self.operation_list))):
op_iter += 1
if (op_iter >= len(self.operation_list)):
break
if isinstance(self.operation_list[op_iter], _operation_layer):
if (self.operation_list[op_iter].layer_number == layer_number):
on_right_layer = True
self.operation_list[op_iter].change_num_layers(value)
if (not on_right_layer):
self_layer = LayerPQC(self)
self_layer.operation_list = copy.copy(self.operation_list)
self.operation_list = []
for var in self.variable_groups:
if (var.size == None):
var.total_variables_used = 0
self.add_layer(self_layer, value)
def get_number_of_variables(self, variablegroup: VariableGroup):
'get how often the variable group was used (required for building parameter vectors by qiskit)'
return variablegroup.num_variables
def get_circuit(self, *args):
'\n returns the quantum circuit\n Args:\n *args: is a tuple with parameter vectors as its entries\n '
if (self.variable_groups != None):
for i in range(len(self.variable_groups)):
self.variable_groups[i].set_index_to_zero()
var_param_assignment = {hash(self.variable_groups[i]): args[i] for i in range(len(args))}
QC = QuantumCircuit(self.num_qubits)
for operation in self.operation_list:
if isinstance(operation, _operation_layer):
operation_layer = operation
for i in range(operation_layer.num_layers):
for op in operation_layer.layer.operation_list:
if (op.variablegroup_tuple == None):
QC = QC.compose(op.get_circuit())
else:
QC = QC.compose(op.get_circuit(var_param_assignment))
elif (operation.variablegroup_tuple == None):
QC = QC.compose(operation.get_circuit())
else:
QC = QC.compose(operation.get_circuit(var_param_assignment))
return QC
def H(self):
'Adds a Hadamard gate layer'
self.add_operation(_H_operation(self.num_qubits, None))
def X(self):
'Adds a Pauli X gate layer'
self.add_operation(_X_operation(self.num_qubits, None))
def Y(self):
'Adds a Pauli Y gate layer'
self.add_operation(_Y_operation(self.num_qubits, None))
def Z(self):
'Adds a Pauli Z gate layer'
self.add_operation(_Z_operation(self.num_qubits, None))
def I(self):
'Adds a identity gate layer'
self.add_operation(_Id_operation(self.num_qubits, None))
def S(self):
'Adds a S gate layer'
self.add_operation(_S_operation(self.num_qubits, None))
def S_conjugate(self):
'Adds a S conjugate gate layer'
self.add_operation(_S_conjugate_operation(self.num_qubits, None))
def T(self):
'Adds a T gate layer'
self.add_operation(_T_operation(self.num_qubits, None))
def T_conjugate(self):
'Adds a T conjugate gate layer'
self.add_operation(_T_conjugate_operation(self.num_qubits, None))
def Rx(self, *variablegroup_tuple, map=None):
'\n Adds a Rx gate layer.\n\n Args:\n variablegroup_tuple: is a tuple of variable types (x1,x2 etc.)\n '
if (map == None):
self.add_operation(_Rx_operation(self.num_qubits, variablegroup_tuple))
else:
self.add_operation(_Rx_operation(self.num_qubits, variablegroup_tuple, map))
def Ry(self, *variablegroup_tuple, map=None):
'\n Adds a Ry gate layer.\n\n Args:\n variablegroup_tuple: is a tuple of variable types (x1,x2 etc.)\n '
if (map == None):
self.add_operation(_Ry_operation(self.num_qubits, variablegroup_tuple))
else:
self.add_operation(_Ry_operation(self.num_qubits, variablegroup_tuple, map))
def Rz(self, *variablegroup_tuple, map=None):
'\n Adds a Rz gate layer.\n\n Args:\n variablegroup_tuple: is a tuple of variable types (x1,x2 etc.)\n '
if (map == None):
self.add_operation(_Rz_operation(self.num_qubits, variablegroup_tuple))
else:
self.add_operation(_Rz_operation(self.num_qubits, variablegroup_tuple, map))
def P(self, *variablegroup_tuple, map=None):
'\n Adds a P gate layer.\n\n Args:\n variablegroup_tuple: is a tuple of variable types (x1,x2 etc.)\n '
if (map == None):
if (len(variablegroup_tuple) != 1):
raise ValueError('There must be one variable group for a P gate.')
self.add_operation(_P_operation(self.num_qubits, variablegroup_tuple))
else:
self.add_operation(_P_operation(self.num_qubits, variablegroup_tuple, map))
def U(self, *variablegroup_tuple):
'\n Adds a U gate layer.\n\n Args:\n variablegroup_tuple: is a tuple of variable types (x1,x2 etc.)\n '
map = None
if isinstance(variablegroup_tuple[0], tuple):
variablegroup_tuple = variablegroup_tuple[0]
if (map == None):
if (len(variablegroup_tuple) != 3):
raise ValueError('There must be three variable groups for a U gate.')
self.add_operation(_U_operation(self.num_qubits, variablegroup_tuple))
else:
self.add_operation(_U_operation(self.num_qubits, variablegroup_tuple, map))
def ch_entangling(self, ent_strategy='NN'):
'\n Adds a controlled x entangling layer.\n Args:\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CH_entangle_operation(self.num_qubits, None, ent_strategy, map=None))
def cx_entangling(self, ent_strategy='NN'):
'\n Adds a controlled x entangling layer.\n args:\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CX_entangle_operation(self.num_qubits, None, ent_strategy, map=None))
def cy_entangling(self, ent_strategy='NN'):
'\n Adds a controlled y entangling layer.\n args:\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CY_entangle_operation(self.num_qubits, None, ent_strategy, map=None))
def cz_entangling(self, ent_strategy='NN'):
'\n Adds a controlled z entangling layer.\n args:\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CZ_entangle_operation(self.num_qubits, None, ent_strategy, map=None))
def swap(self, ent_strategy='NN'):
'\n Adds a swap gate layer.\n args:\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_SWAP_operation(self.num_qubits, None, ent_strategy, map=None))
def cp_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a controlled phase entangling gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CP_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def crx_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a controlled rx gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CRX_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def cry_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a controlled ry gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CRY_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def crz_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a controlled rz gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_CRZ_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def rxx_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds rxx gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_RXX_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def ryy_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a ryy gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_RYY_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def rzx_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a rzx gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_RZX_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def rzz_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a rzz gate layer.\n args:\n *variablegroup_tuple: should be an empty tuple, because there are no variable groups needed\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: a function for one or more variable groups\n '
self.add_operation(_RZZ_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
def cu_entangling(self, *variablegroup_tuple, ent_strategy='NN', map=None):
'\n Adds a controlled unitary gate layer.\n args:\n *variablegroup_tuple: should be tuple with 4 entries\n Optional:\n ent_strategy: the entangling strategy (NN or AA)\n Default ("NN"): Adds a controlled x nearest neighbor entangling operation\n otherwise ("AA"): Adds a controlled x all in all entangling operation\n map: is not provided for a controlled unitary gate (raises Error if user gives a map)\n '
if isinstance(variablegroup_tuple[0], tuple):
variablegroup_tuple = variablegroup_tuple[0]
if (map != None):
raise AttributeError('There must be no map for a cu entangling layer.')
self.add_operation(_CU_operation(self.num_qubits, variablegroup_tuple, ent_strategy, map))
@classmethod
def from_string(cls, num_qubits: int, gate_layers: str, variable_groups=None):
'initializes a encoding circuit through a given string of gates'
def generate_function(map_string, args):
'Translates a string into a function'
function_string = 'def math_function({var}):\n return {func}\n '.format(func=map_string, var=args)
exec(function_string, globals())
return math_function
def get_closing_bracket_index(word, index):
"gives to an open round bracket '(' the location of the closing bracket. This works especially, if there are more than one open brackets."
if (word[index] != '('):
raise ValueError('There must be an open bracket at index ', index)
bracket_open_counter = 0
for k in range(index, len(word)):
if (word[k] == ')'):
bracket_open_counter -= 1
elif (word[k] == '('):
bracket_open_counter += 1
if (bracket_open_counter == 0):
return k
raise ValueError('At least one closed bracket is missing.')
def make_digit_list_to_number(digit_list):
'Transforms a list of digit into a number'
number_string = ''.join(digit_list)
return int(number_string)
encoding_circuit = cls(num_qubits, variable_groups)
gate_layers = gate_layers.replace(' ', '')
string_iterator = 0
encoding_circuit_active = encoding_circuit
closed_brackets = True
while (string_iterator < len(gate_layers)):
character_iter = gate_layers[string_iterator]
if (character_iter == '-'):
string_iterator += 1
elif character_iter.isdigit():
digit_list = []
while gate_layers[string_iterator].isdigit():
digit_list.append(gate_layers[string_iterator])
string_iterator += 1
if (gate_layers[string_iterator] != '['):
raise ValueError('To create different layers we need "[".')
number_of_layers = make_digit_list_to_number(digit_list)
encoding_circuit_active = LayerPQC(encoding_circuit)
elif (character_iter == '['):
closed_brackets = False
string_iterator += 1
elif (character_iter == ']'):
if (closed_brackets == True):
raise ValueError('There are to many closed brackets.')
closed_brackets = True
encoding_circuit.add_layer(encoding_circuit_active, num_layers=number_of_layers)
encoding_circuit_active = encoding_circuit
string_iterator += 1
elif (character_iter == 'H'):
encoding_circuit_active.H()
string_iterator += 1
elif (character_iter == 'X'):
encoding_circuit_active.X()
string_iterator += 1
elif (character_iter == 'Y'):
encoding_circuit_active.Y()
string_iterator += 1
elif (character_iter == 'Z'):
encoding_circuit_active.Z()
string_iterator += 1
elif (character_iter == 'I'):
encoding_circuit_active.I()
string_iterator += 1
elif (character_iter == 'S'):
if ((string_iterator + 1) < len(gate_layers)):
character_iter_1 = gate_layers[(string_iterator + 1)]
if (character_iter_1 == 'c'):
encoding_circuit_active.S_conjugate()
string_iterator += 2
else:
encoding_circuit_active.S()
string_iterator += 1
else:
encoding_circuit_active.S()
string_iterator += 1
elif (character_iter == 'T'):
if ((string_iterator + 1) < len(gate_layers)):
character_iter_1 = gate_layers[(string_iterator + 1)]
if (character_iter_1 == 'c'):
encoding_circuit_active.T_conjugate()
string_iterator += 2
else:
encoding_circuit_active.T()
string_iterator += 1
else:
encoding_circuit_active.T()
string_iterator += 1
elif (character_iter == 'R'):
character_iter_1 = gate_layers[(string_iterator + 1)]
open_bracket_index = (string_iterator + 2)
end_word = get_closing_bracket_index(gate_layers, open_bracket_index)
semicolon1 = (';' in gate_layers[string_iterator:end_word])
if semicolon1:
semicolon1_index = gate_layers.index(';', string_iterator)
param_vector_word_with_commas = gate_layers[(string_iterator + 3):semicolon1_index]
param_vector_name_list = param_vector_word_with_commas.split(',')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
if (gate_layers[(semicolon1_index + 1)] == '='):
map_comma_index = gate_layers.index(',', semicolon1_index)
map_string = gate_layers[(semicolon1_index + 2):map_comma_index]
bra = gate_layers.index('{', map_comma_index)
cket = gate_layers.index('}', map_comma_index)
map_args = gate_layers[(bra + 1):cket]
map_from_string = generate_function(map_string, map_args)
else:
raise ValueError('Wrong input 2.')
if (character_iter_1 == 'x'):
encoding_circuit_active.Rx(*param_vector_list, map=map_from_string)
elif (character_iter_1 == 'y'):
encoding_circuit_active.Ry(*param_vector_list, map=map_from_string)
elif (character_iter_1 == 'z'):
encoding_circuit_active.Rz(*param_vector_list, map=map_from_string)
else:
raise ValueError('Unknown rotation gate.')
else:
param_vector_name = gate_layers[(string_iterator + 3):end_word]
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector = encoding_circuit.variable_groups[param_index]
if (character_iter_1 == 'x'):
encoding_circuit_active.Rx(param_vector)
elif (character_iter_1 == 'y'):
encoding_circuit_active.Ry(param_vector)
elif (character_iter_1 == 'z'):
encoding_circuit_active.Rz(param_vector)
else:
raise ValueError('Unknown rotation gate.')
string_iterator = (end_word + 1)
elif (character_iter == 'P'):
open_bracket_index = (string_iterator + 1)
end_word = get_closing_bracket_index(gate_layers, open_bracket_index)
semicolon1 = (';' in gate_layers[string_iterator:end_word])
if semicolon1:
semicolon1_index = gate_layers.index(';', string_iterator)
param_vector_word_with_commas = gate_layers[(string_iterator + 2):semicolon1_index]
param_vector_name_list = param_vector_word_with_commas.split(',')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
if (gate_layers[(semicolon1_index + 1)] == '='):
map_comma_index = gate_layers.index(',', semicolon1_index)
map_string = gate_layers[(semicolon1_index + 2):map_comma_index]
bra = gate_layers.index('{', map_comma_index)
cket = gate_layers.index('}', map_comma_index)
map_args = gate_layers[(bra + 1):cket]
map_from_string = generate_function(map_string, map_args)
else:
raise ValueError('Wrong input 2.')
encoding_circuit_active.P(*param_vector_list, map=map_from_string)
else:
param_vector_name = gate_layers[(string_iterator + 2):end_word]
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector = encoding_circuit.variable_groups[param_index]
encoding_circuit_active.P(param_vector)
string_iterator = (end_word + 1)
elif (character_iter == 'U'):
open_bracket_index = (string_iterator + 1)
end_word = get_closing_bracket_index(gate_layers, open_bracket_index)
param_vector_word_with_commas = gate_layers[(string_iterator + 2):end_word]
param_vector_name_list = param_vector_word_with_commas.split(',')
if (len(param_vector_name_list) != 3):
raise ValueError('There must be exactly three parameters for an U gate.')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
encoding_circuit_active.U(*param_vector_list)
string_iterator = (end_word + 1)
elif ((character_iter == 'c') or (character_iter == 'r')):
character_iter_1 = gate_layers[(string_iterator + 1)]
function_pointer = None
if (character_iter_1 == 'h'):
function_pointer = encoding_circuit_active.ch_entangling
elif (character_iter_1 == 'x'):
function_pointer = encoding_circuit_active.cx_entangling
elif (character_iter_1 == 'y'):
function_pointer = encoding_circuit_active.cy_entangling
elif (character_iter_1 == 'z'):
function_pointer = encoding_circuit_active.cz_entangling
elif (character_iter_1 == 's'):
function_pointer = encoding_circuit_active.swap
if (character_iter == 'r'):
function_pointer = None
if (function_pointer != None):
if ((string_iterator + 2) < len(gate_layers)):
character_iter_2 = gate_layers[(string_iterator + 2)]
if (character_iter_2 == '('):
character_iter_3 = gate_layers[(string_iterator + 3)]
character_iter_4 = gate_layers[(string_iterator + 4)]
character_iter_5 = gate_layers[(string_iterator + 5)]
if (character_iter_5 != ')'):
raise ValueError('Unknown entangling strategy.')
if (character_iter_3 == character_iter_4 == 'A'):
function_pointer(ent_strategy='AA')
elif (character_iter_3 == character_iter_4 == 'N'):
function_pointer(ent_strategy='NN')
else:
raise ValueError('Unknown entangling strategy.')
string_iterator += 6
else:
function_pointer()
string_iterator += 2
else:
function_pointer()
string_iterator += 2
elif (character_iter_1 in ('r', 'x', 'y', 'z')):
if ((string_iterator + 2) < len(gate_layers)):
character_iter_2 = gate_layers[(string_iterator + 2)]
else:
raise ValueError('Wrong rotation entangling input.')
func = None
if ((character_iter_1 == 'r') and (character_iter_2 == 'x')):
func = encoding_circuit_active.crx_entangling
elif ((character_iter_1 == 'r') and (character_iter_2 == 'y')):
func = encoding_circuit_active.cry_entangling
elif ((character_iter_1 == 'r') and (character_iter_2 == 'z')):
func = encoding_circuit_active.crz_entangling
elif ((character_iter_1 == 'x') and (character_iter_2 == 'x')):
func = encoding_circuit_active.rxx_entangling
elif ((character_iter_1 == 'y') and (character_iter_2 == 'y')):
func = encoding_circuit_active.ryy_entangling
elif ((character_iter_1 == 'z') and (character_iter_2 == 'z')):
func = encoding_circuit_active.rzz_entangling
elif ((character_iter_1 == 'z') and (character_iter_2 == 'x')):
func = encoding_circuit_active.rzx_entangling
else:
raise ValueError('Unknown rotation gate.')
ent_strategy = 'NN'
given_map = False
open_bracket_index = (string_iterator + 3)
end_word = get_closing_bracket_index(gate_layers, open_bracket_index)
semicolon1 = (';' in gate_layers[string_iterator:end_word])
if semicolon1:
semicolon1_index = gate_layers.index(';', string_iterator)
param_vector_word_with_commas = gate_layers[(string_iterator + 4):semicolon1_index]
param_vector_name_list = param_vector_word_with_commas.split(',')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
semicolon2 = (';' in gate_layers[(semicolon1_index + 1):end_word])
if semicolon2:
semicolon2_index = gate_layers.index(';', (semicolon1_index + 1))
if (gate_layers[(semicolon2_index + 1):(semicolon2_index + 3)] == 'AA'):
ent_strategy = 'AA'
elif (gate_layers[(semicolon2_index + 1):(semicolon2_index + 3)] == 'NN'):
ent_strategy = 'NN'
else:
raise ValueError('Wrong input1.')
if (gate_layers[(semicolon1_index + 1)] == '='):
given_map = True
map_comma_index = gate_layers.index(',', semicolon1_index)
map_string = gate_layers[(semicolon1_index + 2):map_comma_index]
bra = gate_layers.index('{', map_comma_index)
cket = gate_layers.index('}', map_comma_index)
map_args = gate_layers[(bra + 1):cket]
map_from_string = generate_function(map_string, map_args)
elif ((gate_layers[(semicolon1_index + 1):(semicolon1_index + 3)] == 'AA') and (not semicolon2)):
ent_strategy = 'AA'
elif ((gate_layers[(semicolon1_index + 1):(semicolon1_index + 3)] == 'NN') and (not semicolon2)):
ent_strategy = 'NN'
else:
raise ValueError('Wrong input2.')
if given_map:
func(*param_vector_list, map=map_from_string, ent_strategy=ent_strategy)
else:
func(*param_vector_list, ent_strategy=ent_strategy)
else:
param_vector_name = gate_layers[(string_iterator + 4):end_word]
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector = encoding_circuit.variable_groups[param_index]
func(param_vector)
string_iterator = (end_word + 1)
elif (character_iter_1 == 'p'):
if ((string_iterator + 2) < len(gate_layers)):
character_iter_2 = gate_layers[(string_iterator + 2)]
else:
raise ValueError('Wrong phase entangling input.')
ent_strategy = 'NN'
given_map = False
open_bracket_index = (string_iterator + 2)
end_word = get_closing_bracket_index(gate_layers, open_bracket_index)
semicolon1 = (';' in gate_layers[string_iterator:end_word])
if semicolon1:
semicolon1_index = gate_layers.index(';', string_iterator)
param_vector_word_with_commas = gate_layers[(string_iterator + 3):semicolon1_index]
param_vector_name_list = param_vector_word_with_commas.split(',')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
semicolon2 = (';' in gate_layers[(semicolon1_index + 1):end_word])
if semicolon2:
semicolon2_index = gate_layers.index(';', (semicolon1_index + 1))
if (gate_layers[(semicolon2_index + 1):(semicolon2_index + 3)] == 'AA'):
ent_strategy = 'AA'
elif (gate_layers[(semicolon2_index + 1):(semicolon2_index + 3)] == 'NN'):
ent_strategy = 'NN'
else:
raise ValueError('Wrong input1.')
if (gate_layers[(semicolon1_index + 1)] == '='):
given_map = True
map_comma_index = gate_layers.index(',', semicolon1_index)
map_string = gate_layers[(semicolon1_index + 2):map_comma_index]
bra = gate_layers.index('{', map_comma_index)
cket = gate_layers.index('}', map_comma_index)
map_args = gate_layers[(bra + 1):cket]
map_from_string = generate_function(map_string, map_args)
elif ((gate_layers[(semicolon1_index + 1):(semicolon1_index + 3)] == 'AA') and (not semicolon2)):
ent_strategy = 'AA'
elif ((gate_layers[(semicolon1_index + 1):(semicolon1_index + 3)] == 'NN') and (not semicolon2)):
ent_strategy = 'NN'
else:
raise ValueError('Wrong input2.')
if given_map:
encoding_circuit_active.cp_entangling(*param_vector_list, map=map_from_string, ent_strategy=ent_strategy)
else:
encoding_circuit_active.cp_entangling(*param_vector_list, ent_strategy=ent_strategy)
else:
param_vector_name = gate_layers[(string_iterator + 3):end_word]
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector = encoding_circuit.variable_groups[param_index]
encoding_circuit_active.cp_entangling(param_vector)
string_iterator = (end_word + 1)
elif (character_iter_1 == 'u'):
if ((string_iterator + 2) < len(gate_layers)):
character_iter_2 = gate_layers[(string_iterator + 2)]
else:
raise ValueError('Wrong phase entangling input.')
open_bracket_index = (string_iterator + 2)
end_word = get_closing_bracket_index(gate_layers, open_bracket_index)
semicolon1 = (';' in gate_layers[string_iterator:end_word])
if semicolon1:
semicolon1_index = gate_layers.index(';', string_iterator)
param_vector_word_with_commas = gate_layers[(string_iterator + 3):semicolon1_index]
param_vector_name_list = param_vector_word_with_commas.split(',')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
if (gate_layers[(semicolon1_index + 1):(semicolon1_index + 3)] == 'AA'):
encoding_circuit_active.cu_entangling(*param_vector_list, ent_strategy='AA')
elif (gate_layers[(semicolon1_index + 1):(semicolon1_index + 3)] == 'NN'):
encoding_circuit_active.cu_entangling(*param_vector_list, ent_strategy='NN')
else:
raise ValueError('Unknown entangling strategy input.')
else:
param_vector_word_with_commas = gate_layers[(string_iterator + 3):end_word]
param_vector_name_list = param_vector_word_with_commas.split(',')
param_vector_list = []
for param_vector_name in param_vector_name_list:
param_index = encoding_circuit.variable_name_tuple.index(param_vector_name)
param_vector_list.append(encoding_circuit.variable_groups[param_index])
encoding_circuit_active.cu_entangling(*param_vector_list)
string_iterator = (end_word + 1)
else:
raise ValueError('Unknown entangling operation.')
else:
raise ValueError((character_iter + ' is an unknown operation input or an unknown character.'))
return encoding_circuit
def to_encoding_circuit(self, feature_variable_group: Union[(VariableGroup, list)], parameters_variable_group: Union[(VariableGroup, list)]):
return ConvertedLayeredEncodingCircuit(self, feature_variable_group, parameters_variable_group)
|
class LayerPQC(LayeredPQC):
'\n default class for a layer: the user is able to build his one list of operations and this list can be added to the main class LayeredEncodingCircuit\n '
def __init__(self, encoding_circuit: LayeredPQC):
super().__init__(encoding_circuit.num_qubits, encoding_circuit.variable_groups)
def add_operation(self, operation: _operation):
"\n like the parent add_operation method with the exception, that we mustn't count the variable groups up, otherwise it would count once too much\n "
self.operation_list.append(operation)
|
class ConvertedLayeredEncodingCircuit(EncodingCircuitBase):
'\n Data structure for converting a LayeredPQC structure into sqlearn encoding circuit structure.\n The programmer specifies, which variable groups are considered as features\n and which are parameters\n\n Args:\n layered_pqc [LayeredPQC]: Layered PQC that should be converted\n feature_variable_group Union[VariableGroup,list]: List of variable groups that\n are considered as the feature variable.\n parameters_variable_group Union[VariableGroup,list]: List of variable groups that\n are considered as the parameter variable.\n '
def __init__(self, layered_pqc: LayeredPQC, feature_variable_group: Union[(VariableGroup, list)], parameters_variable_group: Union[(VariableGroup, list)]) -> None:
self._layered_pqc = layered_pqc
if isinstance(feature_variable_group, VariableGroup):
self._feature_variable_group = [feature_variable_group]
else:
self._feature_variable_group = feature_variable_group
if isinstance(parameters_variable_group, VariableGroup):
self._parameters_variable_group = [parameters_variable_group]
else:
self._parameters_variable_group = parameters_variable_group
@property
def num_qubits(self) -> int:
'Returns number of qubits of the Layered Encoding Circuit'
return self._layered_pqc.num_qubits
@property
def num_features(self) -> int:
'Returns number of features of the Layered Encoding Circuit'
num_features = 0
for vg in self._feature_variable_group:
num_features += self._layered_pqc.get_number_of_variables(vg)
return num_features
@property
def num_parameters(self) -> int:
'Returns number of parameters of the Layered Encoding Circuit'
num_parameters = 0
for vg in self._parameters_variable_group:
num_parameters += self._layered_pqc.get_number_of_variables(vg)
return num_parameters
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
'\n Returns the circuit of the Layered Encoding Circuit\n\n Args:\n features Union[ParameterVector,np.ndarray]: Input vector of the features\n from which the gate inputs are obtained\n param_vec Union[ParameterVector,np.ndarray]: Input vector of the parameters\n from which the gate inputs are obtained\n\n Return:\n Returns the circuit in qiskit QuantumCircuit format\n '
split = []
feature_name = []
ioff = 0
for vg in self._feature_variable_group:
ioff += self._layered_pqc.get_number_of_variables(vg)
split.append(ioff)
feature_name.append(vg.variable_name)
split_features = np.split(features, split)
split = []
parameter_name = []
ioff = 0
for vg in self._parameters_variable_group:
ioff += self._layered_pqc.get_number_of_variables(vg)
split.append(ioff)
parameter_name.append(vg.variable_name)
split_parameters = np.split(parameters, split)
vg_dict = dict(zip(feature_name, split_features))
vg_dict.update(dict(zip(parameter_name, split_parameters)))
input_list = []
for name in self._layered_pqc.variable_name_tuple:
input_list.append(vg_dict[name])
return self._layered_pqc.get_circuit(*input_list)
|
class LayeredEncodingCircuit(EncodingCircuitBase):
'\n A class for a simple creation of layered encoding circuits.\n\n Gates are added to all qubits by calling the associated function similar to Qiskit\'s circuits.\n Single qubit gates are added to all qubits, while two qubits gates can be added with different\n entanglement patterns. The implemented one and two qubit gates are listed below.\n\n Some gates have a input variable, as for example rotation gates, that can be set by supplying\n the string ``"x"`` for feature or ``"p"`` for parameter. Non-linear mapping can\n be added by setting the map variable ``map=``. Two qubit gates can be placed either\n in a nearest-neighbor ``NN`` or a all to all entangling pattern ``AA``.\n\n **Simple Layered Encoding Circuit**\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import LayeredEncodingCircuit\n encoding_circuit = LayeredEncodingCircuit(num_qubits=4,num_features=2)\n encoding_circuit.H()\n encoding_circuit.Rz("x")\n encoding_circuit.Ry("p")\n encoding_circuit.cx_entangling("NN")\n encoding_circuit.draw("mpl")\n\n\n **Create a layered encoding circuit with non-linear input encoding**\n\n It is also possible to define a non-linear function for encoding variables in gates by\n supplying a function for the encoding as the second argument\n\n .. jupyter-execute::\n\n import numpy as np\n from squlearn.encoding_circuit import LayeredEncodingCircuit\n\n def func(a,b):\n return a*np.arccos(b)\n\n encoding_circuit = LayeredEncodingCircuit(num_qubits=4,num_features=2)\n encoding_circuit.H()\n encoding_circuit.Rz("p","x",encoding=func)\n encoding_circuit.cx_entangling("NN")\n encoding_circuit.draw("mpl")\n\n\n **Create a layered encoding circuit with layers**\n\n Furthermore, it is possible to define layers and repeat them.\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import LayeredEncodingCircuit\n from squlearn.encoding_circuit.layered_encoding_circuit import Layer\n encoding_circuit = LayeredEncodingCircuit(num_qubits=4,num_features=2)\n encoding_circuit.H()\n layer = Layer(encoding_circuit)\n layer.Rz("x")\n layer.Ry("p")\n layer.cx_entangling("NN")\n encoding_circuit.add_layer(layer,num_layers=3)\n encoding_circuit.draw("mpl")\n\n\n **Create a layered encoding circuit from string**\n\n Another very useful feature is the creation from encoding circuits from strings.\n This can be achieved by the function ``LayeredEncodingCircuit.from_string()``.\n\n Gates are separated by ``-``, layers can be specified by ``N[...]`` where ``N`` is the\n number of repetitions. The entangling strategy can be set by adding ``NN`` or ``AA``.\n Adding a encoding function is possible by adding a ``=`` and the function definition as a\n string. The variables used in the function are given within curly brackets,\n e.g. ``crz(p;=a*np.arccos(b),{y,x};NN)``.\n\n The following strings are used for the gates:\n\n .. list-table:: Single qubit gates and their string representation\n :widths: 15 25 15 25 15 25\n :header-rows: 1\n\n * - String\n - Function\n - String\n - Function\n - String\n - Function\n * - ``"H"``\n - :meth:`H`\n - ``"I"``\n - :meth:`I`\n - ``"P"``\n - :meth:`P`\n * - ``"Rx"``\n - :meth:`Rx`\n - ``"Ry"``\n - :meth:`Ry`\n - ``"Rz"``\n - :meth:`Rz`\n * - ``"S"``\n - :meth:`S`\n - ``"Sc"``\n - :meth:`S_conjugate`\n - ``"T"``\n - :meth:`T`\n * - ``"Tc"``\n - :meth:`T_conjugate`\n - ``"U"``\n - :meth:`U`\n - ``"X"``\n - :meth:`X`\n * - ``"Y"``\n - :meth:`Y`\n - ``"Z"``\n - :meth:`Z`\n -\n -\n\n .. list-table:: Two qubit gates and their string representation\n :widths: 25 25 25 25 25 25\n :header-rows: 1\n\n * - String\n - Function\n - String\n - Function\n - String\n - Function\n * - ``"ch"``\n - :meth:`ch_entangling`\n - ``"cx"``\n - :meth:`cx_entangling`\n - ``"cy"``\n - :meth:`cy_entangling`\n * - ``"cz"``\n - :meth:`cz_entangling`\n - ``"s"``\n - :meth:`swap`\n - ``"cp"``\n - :meth:`cp_entangling`\n * - ``"crx"``\n - :meth:`crx_entangling`\n - ``"cry"``\n - :meth:`cry_entangling`\n - ``"crz"``\n - :meth:`crz_entangling`\n * - ``"rxx"``\n - :meth:`rxx_entangling`\n - ``"ryy"``\n - :meth:`ryy_entangling`\n - ``"rzz"``\n - :meth:`rzz_entangling`\n * - ``"rzx"``\n - :meth:`rzx_entangling`\n - ``"cu"``\n - :meth:`cu_entangling`\n -\n -\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import LayeredEncodingCircuit\n encoding_circuit = LayeredEncodingCircuit.from_string(\n "Ry(p)-3[Rx(p,x;=y*np.arccos(x),{y,x})-crz(p)]-Ry(p)", num_qubits=4, num_features=1\n )\n encoding_circuit.draw("mpl")\n\n ** Hyperparameter optimization **\n\n If layers are introduced in the construction of the LayeredEncodingCircuit, the number of\n layers can be adjusted afterwards by the :meth:`set_params` method. This is also possible\n for multiple layers, for which the parameters are additionally number, e.g. ``num_layer_1`` .\n The number of layers as well as the number of qubits and the construction string are available\n as hyper-parameters that can be optimized in a hyper-parameter search.\n\n .. jupyter-execute::\n\n from sklearn.datasets import make_regression\n from sklearn.model_selection import GridSearchCV\n from squlearn.encoding_circuit import LayeredEncodingCircuit\n from squlearn.kernel import ProjectedQuantumKernel, QKRR\n from squlearn.util import Executor\n\n X, y = make_regression(n_samples=40, n_features=1, noise=0.1, random_state=42)\n\n lec = LayeredEncodingCircuit.from_string("Ry(x)-Rz(x)-cx",1,1)\n pqk = ProjectedQuantumKernel(lec,Executor())\n qkrr = QKRR(quantum_kernel=pqk)\n param_grid ={\n "encoding_circuit_str": ["Ry(x)-Rz(x)-cx", "Ry(x)-cx-Rx(x)"],\n "num_qubits" : [1,2],\n "num_layers" : [1,2]\n }\n grid_search = GridSearchCV(qkrr, param_grid, cv=2)\n grid_search.fit(X, y)\n print("\\nBest solution: ", grid_search.best_params_)\n\n\n Args:\n num_qubits (int): Number of qubits of the encoding circuit\n num_features (int): Dimension of the feature vector\n feature_str (str): Label for identifying the feature variable group (default: ``"x"``).\n parameter_str (str): Label for identifying the parameter variable group (default: ``"p"``).\n '
def __init__(self, num_qubits: int, num_features: int, feature_str: str='x', parameter_str: str='p', **kwargs) -> None:
super().__init__(num_qubits, num_features)
self._feature_str = feature_str
self._parameter_str = parameter_str
self._x = VariableGroup(self._feature_str, size=num_features)
self._p = VariableGroup(self._parameter_str)
self._layered_pqc = LayeredPQC(num_qubits=num_qubits, variable_groups=(self._x, self._p))
self._encoding_circuit_str = ''
if kwargs:
self.set_params(**kwargs)
@property
def num_parameters(self) -> int:
'Returns number of parameters of the Layered Encoding Circuit'
return self._layered_pqc.get_number_of_variables(self._p)
def get_params(self, deep: bool=True) -> dict:
params = self._layered_pqc.get_params(deep)
params['num_features'] = self._num_features
params['feature_str'] = self._feature_str
params['encoding_circuit_str'] = self._encoding_circuit_str
return params
def set_params(self, **params) -> None:
if ('encoding_circuit_str' in params):
self._encoding_circuit_str = params['encoding_circuit_str']
self._p.total_variables_used = 0
self._layered_pqc = LayeredPQC.from_string(self._num_qubits, self._encoding_circuit_str, (self._x, self._p))
valid_params = self.get_params()
for (key, value) in params.items():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
if ('num_features' in params):
self._num_features = params['num_features']
self._x.size = params['num_features']
if ('num_qubits' in params):
self._num_qubits = params['num_qubits']
dict_layered_pqc = {}
for key in params.keys():
if (key in self._layered_pqc.get_params().keys()):
dict_layered_pqc[key] = params[key]
self._layered_pqc.set_params(**dict_layered_pqc)
return self
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
'\n Returns the circuit of the Layered Encoding Circuit\n\n Args:\n features Union[ParameterVector,np.ndarray]: Input vector of the features\n from which the gate inputs are obtained\n param_vec Union[ParameterVector,np.ndarray]: Input vector of the parameters\n from which the gate inputs are obtained\n\n Return:\n Returns the circuit in qiskit QuantumCircuit format\n '
return self._layered_pqc.get_circuit(features, parameters)
@classmethod
def from_string(cls, encoding_circuit_str: str, num_qubits: int, num_features: int, feature_str: str='x', parameter_str: str='p', num_layers: int=1):
"\n Constructs a Layered Encoding Circuit through a given string of gates.\n\n Args:\n encoding_circuit_str (str): String that specifies the encoding circuit\n num_qubits (int): Number of qubits in the encoding circuit\n num_features (int): Dimension of the feature vector.\n feature_str (str): String that is used in encoding_circuit_str to label features (default: 'x')\n parameter_str (str): String that is used in encoding_circuit_str to label parameters (default: 'p')\n num_laters (int): Number of layers, i.e., the number of repetitions of the encoding circuit (default: 1)\n\n Returns:\n Returns a LayeredEncodingCircuit object that contains the specified encoding circuit.\n\n "
layered_encoding_circuit = cls(num_qubits, num_features, feature_str, parameter_str)
layered_encoding_circuit._layered_pqc = LayeredPQC.from_string(num_qubits, encoding_circuit_str, (layered_encoding_circuit._x, layered_encoding_circuit._p))
if (num_layers > 1):
layered_encoding_circuit.set_params(num_layers=num_layers)
layered_encoding_circuit._encoding_circuit_str = encoding_circuit_str
return layered_encoding_circuit
def add_layer(self, layer, num_layers=1) -> None:
'\n Add a layer `num_layers` times.\n\n Args:\n layer: Layer structure\n num_layers (int): Number of times that the layer is repeated\n\n '
self._layered_pqc.add_layer(layer.layered_pqc, num_layers)
def _str_to_variable_group(self, input_string: Union[(tuple, str)]) -> VariableGroup:
'\n Internal function to convert a string to the\n feature or parameter variable group\n\n Args:\n input_string (str): String that is either feature_str or parameter_str\n\n Returns:\n Associated variable group\n '
if (input_string == self._feature_str):
return self._x
elif (input_string == self._parameter_str):
return self._p
elif isinstance(input_string, tuple):
return tuple([self._str_to_variable_group(str) for str in input_string])
else:
raise ValueError('Unknown variable type!')
def _param_gate(self, *variable, function, encoding: Union[(Callable, None)]=None):
'\n Internal conversion routine for one qubit gates that calls the LayeredPQC routines with the correct\n variable group data\n '
vg_list = [self._str_to_variable_group(str) for str in variable]
return function(*vg_list, map=encoding)
def _param_gate_U(self, *variable, function):
'\n Internal conversion routine for one qubit gates that calls the LayeredPQC routines with the correct\n variable group data\n '
vg_list = [self._str_to_variable_group(str) for str in variable]
return function(*vg_list)
def _two_param_gate(self, *variable, function, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'\n Internal conversion routine for two qubit gates that calls the LayeredPQC routines with the correct\n variable group data\n '
vg_list = [self._str_to_variable_group(str) for str in variable]
return function(*vg_list, ent_strategy=ent_strategy, map=encoding)
def H(self):
'Adds a layer of H gates to the Layered Encoding Circuit'
self._layered_pqc.H()
def X(self):
'Adds a layer of X gates to the Layered Encoding Circuit'
self._layered_pqc.X()
def Y(self):
'Adds a layer of Y gates to the Layered Encoding Circuit'
self._layered_pqc.Y()
def Z(self):
'Adds a layer of Z gates to the Layered Encoding Circuit'
self._layered_pqc.Z()
def I(self):
'Adds a layer of I gates to the Layered Encoding Circuit'
self._layered_pqc.I()
def S(self):
'Adds a layer of S gates to the Layered Encoding Circuit'
self._layered_pqc.S()
def S_conjugate(self):
'Adds a layer of conjugated S gates to the Layered Encoding Circuit'
self._layered_pqc.S_conjugate()
def T(self):
'Adds a layer of T gates to the Layered Encoding Circuit'
self._layered_pqc.T()
def T_conjugate(self):
'Adds a layer of conjugated T gates to the Layered Encoding Circuit'
self._layered_pqc.T_conjugate()
def Rx(self, *variable_str: str, encoding: Union[(Callable, None)]=None):
'Adds a layer of Rx gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._param_gate(*variable_str, function=self._layered_pqc.Rx, encoding=encoding)
def Ry(self, *variable_str, encoding: Union[(Callable, None)]=None):
'Adds a layer of Ry gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._param_gate(*variable_str, function=self._layered_pqc.Ry, encoding=encoding)
def Rz(self, *variable_str, encoding: Union[(Callable, None)]=None):
'Adds a layer of Rz gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._param_gate(*variable_str, function=self._layered_pqc.Rz, encoding=encoding)
def P(self, *variable_str, encoding: Union[(Callable, None)]=None):
'Adds a layer of P gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._param_gate(*variable_str, function=self._layered_pqc.P, encoding=encoding)
def U(self, *variable_str):
'Adds a layer of U gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._param_gate_U(*variable_str, function=self._layered_pqc.U)
def ch_entangling(self, ent_strategy='NN'):
'Adds a layer of controlled H gates to the Layered Encoding Circuit\n\n Args:\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n '
self._layered_pqc.ch_entangling(ent_strategy)
def cx_entangling(self, ent_strategy='NN'):
'Adds a layer of controlled X gates to the Layered Encoding Circuit\n\n Args:\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n '
self._layered_pqc.cx_entangling(ent_strategy)
def cy_entangling(self, ent_strategy='NN'):
'Adds a layer of controlled Y gates to the Layered Encoding Circuit\n\n Args:\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n '
self._layered_pqc.cy_entangling(ent_strategy)
def cz_entangling(self, ent_strategy='NN'):
'Adds a layer of controlled Z gates to the Layered Encoding Circuit\n\n Args:\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n '
self._layered_pqc.cz_entangling(ent_strategy)
def swap(self, ent_strategy='NN'):
'Adds a layer of swap gates to the Layered Encoding Circuit\n\n Args:\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n '
self._layered_pqc.swap(ent_strategy)
def cp_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of controlled P gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.cp_entangling, ent_strategy=ent_strategy, encoding=encoding)
def crx_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of controlled Rx gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.crx_entangling, ent_strategy=ent_strategy, encoding=encoding)
def cry_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of controlled Ry gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.cry_entangling, ent_strategy=ent_strategy, encoding=encoding)
def crz_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of controlled Rz gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.crz_entangling, ent_strategy=ent_strategy, encoding=encoding)
def rxx_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of Rxx gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.rxx_entangling, ent_strategy=ent_strategy, encoding=encoding)
def ryy_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of Ryy gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.ryy_entangling, ent_strategy=ent_strategy, encoding=encoding)
def rzx_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of Rzx gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.rzx_entangling, ent_strategy=ent_strategy, encoding=encoding)
def rzz_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of Rzz gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.rzz_entangling, ent_strategy=ent_strategy, encoding=encoding)
def cu_entangling(self, *variable_str, ent_strategy='NN', encoding: Union[(Callable, None)]=None):
'Adds a layer of controlled U gates to the Layered Encoding Circuit\n\n Args:\n variable_str (str): Labels of variables that are used in the gate\n ent_strategy (str): Entanglement strategy that is used to determine the entanglement,\n either ``"NN"`` or ``"AA"``.\n encoding (Callable): Encoding function that is applied to the variables, input in the\n same order as the given labels in variable_str\n '
self._two_param_gate(*variable_str, function=self._layered_pqc.cu_entangling, ent_strategy=ent_strategy, encoding=encoding)
|
class Layer(LayeredEncodingCircuit):
'Class for defining a Layer of the Layered Encoding Circuit'
def __init__(self, encoding_circuit: LayeredEncodingCircuit):
super().__init__(encoding_circuit.num_qubits, encoding_circuit.num_features, encoding_circuit._feature_str, encoding_circuit._parameter_str)
self._x = encoding_circuit._x
self._p = encoding_circuit._p
self._layered_pqc = LayerPQC(encoding_circuit._layered_pqc)
@property
def layered_pqc(self):
'Returns the LayerPQC object of the Layered Encoding Circuit'
return self._layered_pqc
|
class _operation_layer():
'\n class for the operation_list in LayeredPQC. Stores layers of operations, which are created by the Layer class.\n '
def __init__(self, layer: LayerPQC, num_layers: int=1, layer_number: int=1) -> None:
self.layer = layer
self.num_layers = num_layers
self.layer_number = layer_number
def change_qubits(self, value):
'\n This method is called by the set_params method, if the user changes the number of qubits of the whole encoding circuit.\n '
for operation in self.layer.operation_list:
var_group_tuple = operation.variablegroup_tuple
operation.num_qubits = value
if (var_group_tuple != None):
if (operation.ent_strategy == None):
for var_group in var_group_tuple:
var_group.increase_used_number_of_variables((self.num_layers * (value - self.layer.num_qubits)))
elif (operation.ent_strategy == 'NN'):
for var_group in var_group_tuple:
var_group.increase_used_number_of_variables((self.num_layers * (value - self.layer.num_qubits)))
else:
for var_group in var_group_tuple:
old_num_of_variables = sum((x for x in range(1, self.layer.num_qubits)))
new_num_of_variables = sum((x for x in range(1, value)))
var_group.increase_used_number_of_variables((self.num_layers * (new_num_of_variables - old_num_of_variables)))
self.layer._num_qubits = value
def change_num_layers(self, value):
'\n This method is called by the set_params method, if the user changes the number of layers of the layer attribute of this operation_layer object (self).\n '
num_layers_difference = (value - self.num_layers)
num_qubits = self.layer.num_qubits
for layer_operation in self.layer.operation_list:
variablegroup_tuple = layer_operation.variablegroup_tuple
if (variablegroup_tuple != None):
if (layer_operation.ent_strategy == None):
number_of_variables = num_qubits
elif (layer_operation.ent_strategy == 'NN'):
number_of_variables = (num_qubits - 1)
else:
number_of_variables = sum((x for x in range(1, num_qubits)))
variable_num_list = [(num_layers_difference * number_of_variables) for i in range(len(variablegroup_tuple))]
iteration_counter = 0
for variablegroup in variablegroup_tuple:
variablegroup.increase_used_number_of_variables(variable_num_list[iteration_counter])
iteration_counter += 1
self.num_layers = value
|
class TranspiledEncodingCircuit(EncodingCircuitBase):
'\n Class for generated a Encoding Circuit with a transpiled circuit.\n\n **Example:**\n\n .. jupyter-execute::\n\n from squlearn.encoding_circuit import TranspiledEncodingCircuit,ChebyshevRx\n from qiskit.providers.fake_provider import FakeManilaV2\n\n fm = TranspiledEncodingCircuit(ChebyshevRx(3,1),backend=FakeManilaV2(),initial_layout=[0,1,4])\n fm.draw("mpl")\n\n\n Args:\n encoding_circuit (EncodingCircuitBase): Encoding circuit to be transpiled.\n backend (Backend): Backend used for the transpilation.\n transpile_func (Union[Callable,None]): Optional function for transpiling the circuit.\n First argument is the circuit, second the backend.\n If no function is specified, Qiskit\'s transpile\n function is used.\n kwargs: Additional arguments for `Qiskit\'s transpile function\n <https://qiskit.org/documentation/apidoc/compiler.html#qiskit.compiler.transpile>`_.\n\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, backend: Backend, transpile_func: Union[(Callable, None)]=None, **kwargs) -> None:
self._encoding_circuit = encoding_circuit
self._backend = backend
self._transpile_func = transpile_func
self._x = ParameterVector('x', self._encoding_circuit.num_features)
self._p = ParameterVector('p', self._encoding_circuit.num_parameters)
self._circuit = self._encoding_circuit.get_circuit(self._x, self._p)
if (self._transpile_func is not None):
self._transpiled_circuit = self._transpile_func(self._circuit, self._backend)
else:
if ('optimization_level' not in kwargs):
kwargs['optimization_level'] = 3
if ('seed_transpiler' not in kwargs):
kwargs['seed_transpiler'] = 0
self._transpiled_circuit = transpile(self._circuit, self._backend, **kwargs)
self._qubit_map = _gen_qubit_mapping(self._transpiled_circuit)
self._kwargs = kwargs
@property
def num_qubits(self) -> int:
'Number of qubits (physical) of the encoding circuit.'
return self._transpiled_circuit.num_qubits
@property
def num_physical_qubits(self) -> int:
'Number of physical qubits of the encoding circuit.'
return self._transpiled_circuit.num_qubits
@property
def num_virtual_qubits(self) -> int:
'Number of virtual qubits in the encoding circuit.'
return self._encoding_circuit.num_qubits
@property
def qubit_map(self) -> dict:
'Dictionary which maps virtual to physical qubits.'
return self._qubit_map
@property
def backend(self) -> int:
'Backend used for the transpilation.'
return self._backend
@property
def num_features(self) -> int:
'Feature dimension of the encoding circuit.'
return self._encoding_circuit.num_features
@property
def num_parameters(self) -> int:
'Number of trainable parameters of the encoding circuit.'
return self._encoding_circuit.num_parameters
@property
def parameter_bounds(self) -> np.ndarray:
'Bounds of the trainable parameters of the encoding circuit.'
return self._encoding_circuit.parameter_bounds
@property
def feature_bounds(self) -> np.ndarray:
'Bounds of the features of the encoding circuit.'
return self._encoding_circuit.feature_bounds
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the encoding circuit.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
return self._encoding_circuit.get_params()
def set_params(self, **params) -> None:
'\n Sets value of the encoding circuit hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``\n '
self._encoding_circuit.set_params(**params)
self.__init__(self._encoding_circuit, self._backend, self._transpile_func, **self._kwargs)
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
"\n Return the circuit of the transpiled encoding circuit\n\n Args:\n features Union[ParameterVector,np.ndarray]: Input vector of the features\n from which the gate inputs are obtained\n param_vec Union[ParameterVector,np.ndarray]: Input vector of the parameters\n from which the gate inputs are obtained\n\n Return:\n Returns the transpiled circuit in Qiskit's QuantumCircuit format\n "
exchange_dict_x = dict(zip(self._x, features))
exchange_dict_p = dict(zip(self._p, parameters))
exchange_both = exchange_dict_x
exchange_both.update(exchange_dict_p)
return self._transpiled_circuit.assign_parameters(exchange_both)
|
def _gen_qubit_mapping(circuit: QuantumCircuit) -> dict:
'\n Returns dictionary that maps virtual qubits to the physical ones\n\n Args:\n circuit (QuantumCircuit): quantum circuit (ideally transpiled)\n\n Returns:\n Dictionary which maps virtual to physical qubits\n '
dic = {}
try:
from qiskit.transpiler.layout import TranspileLayout
if isinstance(circuit._layout, TranspileLayout):
layout = circuit._layout.initial_layout
else:
layout = circuit._layout
bit_locations = {bit: {'register': register, 'index': index} for register in layout.get_registers() for (index, bit) in enumerate(register)}
for (index, qubit) in enumerate(layout.get_virtual_bits()):
if (qubit not in bit_locations):
bit_locations[qubit] = {'register': None, 'index': index}
for (key, val) in layout.get_virtual_bits().items():
bit_register = bit_locations[key]['register']
if ((bit_register is None) or (bit_register.name != 'ancilla')):
dic[bit_locations[key]['index']] = val
except:
for i in range(circuit.num_qubits):
dic[i] = i
return dic
|
class FidelityKernel(KernelMatrixBase):
'\n Fidelity Quantum Kernel.\n\n The Fidelity Quantum Kernel is a based on the overlap of the quantum states.\n These quantum states\n can be defined by a parameterized quantum circuit. The Fidelity Quantum Kernel is defined as:\n\n .. math::\n\n K(x,y) = |\\langle \\phi(x) | \\phi(y) \\rangle|^2\n\n This class wraps to the respective Quantum Kernel implementations from `Qiskit Machine Learning\n <https://qiskit.org/ecosystem/machine-learning/apidocs/qiskit_machine_learning.kernels.html>`_.\n Depending on the choice of the backend and the choice of trainable parameters, the appropriate\n Quantum Kernel implementation is chosen.\n\n Args:\n encoding_circuit (EncodingCircuitBase): PQC encoding circuit.\n executor (Executor): Executor object.\n evaluate_duplicates (str), default=\'off_diagonal\':\n Option for evaluating duplicates (\'all\', \'off_diagonal\', \'none\').\n mit_depol_noise (Union[str, None]), default=None:\n Option for mitigating depolarizing noise (``"msplit"`` or ``"mmean"``) after\n Ref. [4]. Only meaningful for\n FQKs computed on a real backend.\n initial_parameters (Union[np.ndarray, None], default=None):\n Initial parameters for the encoding circuit.\n parameter_seed (Union[int, None], default=0):\n Seed for the random number generator for the parameter initialization, if\n initial_parameters is None.\n regularization (Union[str, None], default=None):\n Option for choosing different regularization techniques (``"thresholding"`` or\n ``"tikhonov"``) after Ref. [4] for the training kernel matrix, prior to solving the\n linear system in the ``fit()``-procedure.\n\n References:\n [1]: `Havlicek et al., Supervised learning with quantum-enhanced feature spaces,\n Nature 567, 209-212 (2019).\n <https://www.nature.com/articles/s41586-019-0980-2>`_\n\n [2]: `Schuld et al., Quantum Machine Learning in Feature Hilbert Spaces,\n Phys. Rev. Lett. 122, 040504 (2019).\n <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.122.040504>`_\n\n [3]: `Schuld et al., Quantum Machine Learning Models are Kernel Methods:\n Noise-Enhanced Quantum Embeddings, arXiv:2105.02276 (2021).\n <https://arxiv.org/abs/2105.02276>`_\n\n [4]: `T. Hubregtsen et al.,\n "Training Quantum Embedding Kernels on Near-Term Quantum Computers",\n arXiv:2105.02276v1 (2021)\n <https://arxiv.org/abs/2105.02276>`_\n\n\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, executor: Executor, evaluate_duplicates: str='off_diagonal', mit_depol_noise: Union[(str, None)]=None, initial_parameters: Union[(np.ndarray, None)]=None, parameter_seed: Union[(int, None)]=0, regularization: Union[(str, None)]=None) -> None:
super().__init__(encoding_circuit, executor, initial_parameters, parameter_seed, regularization)
self._quantum_kernel = None
self._evaluate_duplicates = evaluate_duplicates
self._mit_depol_noise = mit_depol_noise
self._feature_vector = ParameterVector('x', self.num_features)
if (self.num_parameters > 0):
self._parameter_vector = ParameterVector('θ', self.num_parameters)
else:
self._parameter_vector = None
self._enc_circ = self._encoding_circuit.get_circuit(self._feature_vector, self._parameter_vector)
if ('statevector_simulator' in str(self._executor._backend)):
if (self._parameter_vector is None):
self._quantum_kernel = FidelityStatevectorKernel(feature_map=self._enc_circ)
else:
self._quantum_kernel = TrainableFidelityStatevectorKernel(feature_map=self._enc_circ, training_parameters=self._parameter_vector)
else:
fidelity = ComputeUncompute(sampler=self._executor.get_sampler())
if (self._parameter_vector is None):
self._quantum_kernel = FidelityQuantumKernel(feature_map=self._enc_circ, fidelity=fidelity, evaluate_duplicates=self._evaluate_duplicates)
else:
self._quantum_kernel = TrainableFidelityQuantumKernel(feature_map=self._enc_circ, fidelity=fidelity, training_parameters=self._parameter_vector, evaluate_duplicates=self._evaluate_duplicates)
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the fidelity kernel.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params(deep=False)
params['evaluate_duplicates'] = self._evaluate_duplicates
params['mit_depol_noise'] = self._mit_depol_noise
params['regularization'] = self._regularization
params['encoding_circuit'] = self._encoding_circuit
if deep:
params.update(self._encoding_circuit.get_params())
return params
def set_params(self, **params):
'\n Sets value of the fidelity kernel hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``\n '
num_parameters_backup = self.num_parameters
parameters_backup = self._parameters
valid_params = self.get_params()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
if ('encoding_circuit' in params):
self._encoding_circuit = params['encoding_circuit']
params.pop('encoding_circuit')
dict_encoding_circuit = {}
for key in params.keys():
if (key in self._encoding_circuit.get_params().keys()):
dict_encoding_circuit[key] = params[key]
for key in dict_encoding_circuit.keys():
params.pop(key)
self._encoding_circuit.set_params(**dict_encoding_circuit)
if ('evaluate_duplicates' in params.keys()):
self._evaluate_duplicates = params['evaluate_duplicates'].lower()
params.pop('evaluate_duplicates')
if ('mit_depol_noise' in params.keys()):
self._mit_depol_noise = params['mit_depol_noise']
params.pop('mit_depol_noise')
if ('regularization' in params.keys()):
self._regularization = params['regularization']
params.pop('regularization')
self.__init__(self._encoding_circuit, self._executor, self._evaluate_duplicates, self._mit_depol_noise, None, self._parameter_seed, self._regularization)
if (self.num_parameters == num_parameters_backup):
self._parameters = parameters_backup
if (len(params) > 0):
raise ValueError('The following parameters could not be assigned:', params)
def evaluate(self, x: np.ndarray, y: Union[(np.ndarray, None)]=None) -> np.ndarray:
'\n Evaluates the fidelity kernel matrix.\n\n Args:\n x (np.ndarray) :\n Vector of training or test data for which the kernel matrix is evaluated\n y (np.ndarray, default=None) :\n Vector of training or test data for which the kernel matrix is evaluated\n Returns:\n Returns the quantum kernel matrix as 2D numpy array.\n '
if (y is None):
y = x
kernel_matrix = np.zeros((x.shape[0], y.shape[0]))
if (self._parameter_vector is not None):
if (self._parameters is None):
raise ValueError('Parameters have to been set with assign_parameters or as initial parameters!')
self._quantum_kernel.assign_training_parameters(self._parameters)
kernel_matrix = self._quantum_kernel.evaluate(x, y)
if (self._mit_depol_noise is not None):
print('WARNING: Advanced option. Do not use it within an squlearn.kernel.ml workflow')
if (not np.array_equal(x, y)):
raise ValueError('Mitigating depolarizing noise works only for square matrices computed on real backend')
elif (self._mit_depol_noise == 'msplit'):
kernel_matrix = self._get_msplit_kernel(kernel_matrix)
elif (self._mit_depol_noise == 'mmean'):
kernel_matrix = self._get_mmean_kernel(kernel_matrix)
if ((self._regularization is not None) and (kernel_matrix.shape[0] == kernel_matrix.shape[1])):
kernel_matrix = self._regularize_matrix(kernel_matrix)
return kernel_matrix
def _get_msplit_kernel(self, kernel: np.ndarray) -> np.ndarray:
msplit_kernel_matrix = np.zeros((kernel.shape[0], kernel.shape[1]))
survival_prob = self._survival_probability(kernel)
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
msplit_kernel_matrix[(i, j)] = ((kernel[(i, j)] - ((2 ** ((- 1.0) * self._num_qubits)) * (1 - (survival_prob[i] * survival_prob[j])))) / (survival_prob[i] * survival_prob[j]))
return msplit_kernel_matrix
def _get_mmean_kernel(self, kernel: np.ndarray) -> np.ndarray:
mmean_kernel_matrix = np.zeros((kernel.shape[0], kernel.shape[1]))
survival_prob_mean = self._survival_probability_mean(kernel)
mmean_kernel_matrix = ((kernel - ((2 ** ((- 1.0) * self._num_qubits)) * (1 - (survival_prob_mean ** 2)))) / (survival_prob_mean ** 2))
return mmean_kernel_matrix
def _survival_probability(self, kernel: np.ndarray) -> np.ndarray:
kernel_diagonal = np.diag(kernel)
surv_prob = np.sqrt(((kernel_diagonal - (2 ** ((- 1.0) * self._num_qubits))) / (1 - (2 ** ((- 1.0) * self._num_qubits)))))
return surv_prob
def _survival_probability_mean(self, kernel: np.ndarray) -> float:
surv_prob = self._survival_probability(kernel)
return np.mean(surv_prob)
|
def kernel_wrapper(kernel_matrix: KernelMatrixBase):
"\n Wrapper for sQUlearn's KernelMatrixBase to scikit-learn kernel objects.\n\n Args:\n kernel_matrix (KernelMatrixBase) :\n Quantum kernel matrix which is to be wrapped into scikit-learn kernel\n "
class CustomKernel(Kernel):
def __init__(self, kernel_matrix: KernelMatrixBase):
self.kernel_matrix = kernel_matrix
super().__init__()
def __call__(self, X, Y=None, eval_gradient=False):
if (Y is None):
Y = X
kernel_matrix = self.kernel_matrix.evaluate(X, Y)
if eval_gradient:
raise NotImplementedError('Gradient not yet implemented for this kernel.')
else:
return kernel_matrix
def diag(self, X):
return np.diag(self.kernel_matrix.evaluate(X))
@property
def requires_vector_input(self):
return True
def is_stationary(self):
return self.kernel_matrix.is_stationary()
return CustomKernel(kernel_matrix)
|
class OuterKernelBase():
'\n Base Class for creating outer kernels for the projected quantum kernel\n '
def __init__(self):
self._num_hyper_parameters = 0
self._name_hyper_parameters = []
@abstractmethod
def __call__(self, qnn: QNN, parameters: np.ndarray, x: np.ndarray, y: np.ndarray=None) -> np.ndarray:
'\n Args:\n qnn: QNN object\n parameters: parameters of the QNN\n x: first input\n y: second optional input\n\n Returns:\n Evaluated projected kernel matrix\n '
raise NotImplementedError()
@abstractmethod
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the outer kernel.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
raise NotImplementedError()
@abstractmethod
def set_params(self, **params):
'\n Sets value of the outer kernel hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``\n '
raise NotImplementedError()
@property
def num_hyper_parameters(self) -> int:
'Returns the number of hyper parameters of the outer kernel'
return self._num_hyper_parameters
@property
def name_hyper_parameters(self) -> List[str]:
'Returns the names of the hyper parameters of the outer kernel'
return self._name_hyper_parameters
@classmethod
def from_sklearn_kernel(cls, kernel: SklearnKernel, **kwarg):
'Converts a scikit-learn kernel into a squlearn kernel\n\n Args:\n kernel: scikit-learn kernel\n kwarg: arguments for the scikit-learn kernel parameters\n '
class SklearnOuterKernel(BaseException):
'\n Class for creating outer kernels for the projected quantum kernel from scikit-learn kernels\n\n Args:\n kernel (:py:mod:`sklearn.gaussian_process.kernels`): Scikit-learn kernel\n **kwarg: Arguments for the scikit-learn kernel parameters\n '
def __init__(self, kernel: SklearnKernel, **kwarg):
super().__init__()
self._kernel = kernel(**kwarg)
self._name_hyper_parameters = [p.name for p in self._kernel.hyperparameters]
self._num_hyper_parameters = len(self._name_hyper_parameters)
def __call__(self, qnn: QNN, parameters: np.ndarray, x: np.ndarray, y: np.ndarray=None) -> np.ndarray:
'Evaluates the outer kernel\n\n Args:\n qnn: QNN object\n parameters: parameters of the QNN\n x: first input\n y: second optional input\n '
param = parameters[:qnn.num_parameters]
param_op = parameters[qnn.num_parameters:]
x_result = qnn.evaluate_f(x, param, param_op)
if (y is not None):
y_result = qnn.evaluate_f(y, param, param_op)
else:
y_result = None
return self._kernel(x_result, y_result)
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the scikit-learn kernel.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
return self._kernel.get_params(deep)
def set_params(self, **params):
'\n Sets value of the scikit-learn kernel.\n\n Args:\n params: Hyper-parameters and their values\n '
self._kernel.set_params(**params)
return SklearnOuterKernel(kernel, **kwarg)
|
class ProjectedQuantumKernel(KernelMatrixBase):
'Projected Quantum Kernel for Quantum Kernel Algorithms\n\n The Projected Quantum Kernel embeds classical data into a quantum Hilbert space first and\n than projects down into a real space by measurements. The real space is than used to\n evaluate a classical kernel.\n\n The projection is done by evaluating the expectation values of the encoding circuit with respect\n to given Pauli operators. This is achieved by supplying a list of\n :class:`squlearn.observable` objects to the Projected Quantum Kernel.\n The expectation values are than used as features for the classical kernel, for which\n the different implementations of scikit-learn\'s kernels can be used.\n\n The implementation is based on Ref. [1].\n\n As defaults, a Gaussian outer kernel and the expectation value of all three Pauli matrices\n :math:`\\{\\hat{X},\\hat{Y},\\hat{Z}\\}` are computed for every qubit.\n\n\n Args:\n encoding_circuit (EncodingCircuitBase): Encoding circuit that is evaluated\n executor (Executor): Executor object\n measurement (Union[str, ObservableBase, list]): Expectation values that are\n computed from the encoding circuit. Either an operator, a list of operators or a\n combination of the string values ``X``,``Y``,``Z``, e.g. ``XYZ``\n outer_kernel (Union[str, OuterKernelBase]): OuterKernel that is applied to the expectation\n values. Possible string values are: ``Gaussian``, ``Matern``, ``ExpSineSquared``,\n ``RationalQuadratic``, ``DotProduct``, ``PairwiseKernel``\n initial_parameters (np.ndarray): Initial parameters of the encoding circuit and the\n operator (if parameterized)\n parameter_seed (Union[int, None], default=0):\n Seed for the random number generator for the parameter initialization, if\n initial_parameters is None.\n regularization (Union[str, None], default=None):\n Option for choosing different regularization techniques (``"thresholding"`` or\n ``"tikhonov"``) after Ref. [2] for the training kernel matrix, prior to solving the\n linear system in the ``fit()``-procedure.\n caching (bool, default=True): If True, the results of the low-level QNN are cached.\n\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits of the encoding circuit and the operators\n num_features (int): Number of features of the encoding circuit\n num_parameters (int): Number of trainable parameters of the encoding circuit\n encoding_circuit (EncodingCircuitBase): Encoding circuit that is evaluated\n measurement (Union[str, ObservableBase, list]): Measurements that are\n performed on the encoding circuit\n outer_kernel (Union[str, OuterKernelBase]): OuterKernel that is applied to the expectation\n values\n num_hyper_parameters (int): Number of hyper parameters of the outer kernel\n name_hyper_parameters (List[str]): Names of the hyper parameters of the outer kernel\n parameters (np.ndarray): Parameters of the encoding circuit and the\n operator (if parameterized)\n\n Outer Kernels are implemented as follows:\n =========================================\n\n :math:`d(\\cdot,\\cdot)` is the Euclidean distance between two vectors.\n\n Gaussian:\n ---------\n .. math::\n k(x_i, x_j) = \\text{exp}\\left(-\\gamma |(QNN(x_i)- QNN(x_j)|^2 \\right)\n\n *Keyword Args:*\n\n :gamma (float): hyperparameter :math:`\\gamma` of the Gaussian kernel\n\n Matern:\n -------\n .. math::\n k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(\n \\!\\frac{\\sqrt{2\\nu}}{l} d(QNN(x_i) , QNN(x_j))\\!\n \\Bigg)^\\nu K_\\nu\\Bigg(\n \\!\\frac{\\sqrt{2\\nu}}{l} d(QNN(x_i) , QNN(x_j))\\!\\Bigg)\n\n *Keyword Args:*\n\n :nu (float): hyperparameter :math:`\\nu` of the Matern kernel (Typically ``0.5``, ``1.5``\n or ``2.5``)\n :length_scale (float): hyperparameter :math:`l` of the Matern kernel\n\n ExpSineSquared:\n ---------------\n .. math::\n k(x_i, x_j) = \\text{exp}\\left(-\n \\frac{ 2\\sin^2(\\pi d(QNN(x_i), QNN(x_j))/p) }{ l^ 2} \\right)\n\n *Keyword Args:*\n\n :periodicity (float): hyperparameter :math:`p` of the ExpSineSquared kernel\n :length_scale (float): hyperparameter :math:`l` of the ExpSineSquared kernel\n\n RationalQuadratic:\n ------------------\n .. math::\n k(x_i, x_j) = \\left(\n 1 + \\frac{d(QNN(x_i), QNN(x_j))^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}\n\n *Keyword Args:*\n\n :alpha (float): hyperparameter :math:`\\alpha` of the RationalQuadratic kernel\n :length_scale (float): hyperparameter :math:`l` of the RationalQuadratic kernel\n\n DotProduct:\n -----------\n .. math::\n k(x_i, x_j) = \\sigma_0 ^ 2 + x_i \\cdot x_j\n\n *Keyword Args:*\n\n :sigma_0 (float): hyperparameter :math:`\\sigma_0` of the DotProduct kernel\n\n PairwiseKernel:\n ---------------\n\n scikit-learn\'s PairwiseKernel is used.\n\n *Keyword Args:*\n\n :gamma (float): Hyperparameter gamma of the PairwiseKernel kernel, specified by the metric\n :metric (str): Metric of the PairwiseKernel kernel, can be ``linear``, ``additive_chi2``,\n ``chi2``, ``poly``, ``polynomial``, ``rbf``, ``laplacian``, ``sigmoid``, ``cosine``\n\n See Also:\n * Quantum Fidelity Kernel: :class:`squlearn.kernel.matrix.FidelityKernel`\n * `sklean kernels <https://scikit-learn.org/stable/modules/gaussian_process.html#gp-kernels>`_\n\n References:\n [1] Huang, HY., Broughton, M., Mohseni, M. et al., "Power of data in quantum machine learning",\n `Nat Commun 12, 2631 (2021). <https://doi.org/10.1038/s41467-021-22539-9>`_\n\n [2] T. Hubregtsen et al., "Training Quantum Embedding Kernels on Near-Term Quantum Computers",\n `arXiv:2105.02276v1 (2021). <https://arxiv.org/abs/2105.02276>`_\n\n **Example: Calculate a kernel matrix with the Projected Quantum Kernel**\n\n .. jupyter-execute::\n\n import numpy as np\n from squlearn.encoding_circuit import ChebyshevTower\n from squlearn.kernel.matrix import ProjectedQuantumKernel\n from squlearn.util import Executor\n\n fm = ChebyshevTower(num_qubits=4, num_features=1, num_chebyshev=4)\n kernel = ProjectedQuantumKernel(encoding_circuit=fm, executor=Executor("statevector_simulator"))\n x = np.random.rand(10)\n kernel_matrix = kernel.evaluate(x.reshape(-1, 1), x.reshape(-1, 1))\n print(kernel_matrix)\n\n **Example: Change measurement and outer kernel**\n\n .. jupyter-execute::\n\n import numpy as np\n from squlearn.encoding_circuit import ChebyshevTower\n from squlearn.kernel.matrix import ProjectedQuantumKernel\n from squlearn.util import Executor\n from squlearn.observables import CustomObservable\n from squlearn.kernel.ml import QKRR\n\n fm = ChebyshevTower(num_qubits=4, num_features=1, num_chebyshev=4)\n\n # Create custom observables\n measuments = []\n measuments.append(CustomObservable(4,"ZZZZ"))\n measuments.append(CustomObservable(4,"YYYY"))\n measuments.append(CustomObservable(4,"XXXX"))\n\n # Use Matern Outer kernel with nu=0.5 as a outer kernel hyperparameter\n kernel = ProjectedQuantumKernel(encoding_circuit=fm,\n executor=Executor("statevector_simulator"),\n measurement=measuments,\n outer_kernel="matern",\n nu=0.5)\n ml_method = QKRR(quantum_kernel=kernel)\n\n Methods:\n --------\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, executor: Executor, measurement: Union[(str, ObservableBase, list)]='XYZ', outer_kernel: Union[(str, OuterKernelBase)]='gaussian', initial_parameters: Union[(np.ndarray, None)]=None, parameter_seed: Union[(int, None)]=0, regularization: Union[(str, None)]=None, caching: bool=True, **kwargs) -> None:
super().__init__(encoding_circuit, executor, initial_parameters, parameter_seed, regularization)
self._measurement_input = measurement
self._outer_kernel_input = outer_kernel
self._caching = caching
if isinstance(measurement, str):
self._measurement = []
for m_str in measurement:
if (m_str not in ('X', 'Y', 'Z')):
raise ValueError('Unknown measurement operator: {}'.format(m_str))
for i in range(self.num_qubits):
self._measurement.append(SinglePauli(self.num_qubits, i, op_str=m_str))
elif (isinstance(measurement, ObservableBase) or isinstance(measurement, list)):
self._measurement = measurement
else:
raise ValueError('Unknown type of measurement: {}'.format(type(measurement)))
self._qnn = QNN(self._encoding_circuit, self._measurement, executor, result_caching=self._caching)
self._set_outer_kernel(outer_kernel, **kwargs)
if (initial_parameters is None):
if (self._parameters is None):
self._parameters = np.array([])
if isinstance(self._measurement, list):
for (i, m) in enumerate(self._measurement):
self._parameters = np.concatenate((self._parameters, m.generate_initial_parameters(seed=((parameter_seed + i) + 1))))
elif isinstance(self._measurement, ObservableBase):
self._parameters = np.concatenate((self._parameters, self._measurement.generate_initial_parameters(seed=parameter_seed)))
else:
raise ValueError('Unknown type of measurement: {}'.format(type(measurement)))
if (self._parameters is not None):
if (len(self._parameters) != self.num_parameters):
raise ValueError('Number of initial parameters is wrong, expected number: {}'.format(self.num_parameters))
@property
def num_features(self) -> int:
'Feature dimension of the encoding circuit'
return self._qnn.num_features
@property
def num_parameters(self) -> int:
'Number of trainable parameters of the encoding circuit'
return (self._qnn.num_parameters + self._qnn.num_parameters_observable)
@property
def measurement(self):
'Measurement operator of the Projected Quantum Kernel'
return self._measurement
@property
def outer_kernel(self):
'Outer kernel class of the Projected Quantum Kernel'
return self._outer_kernel
def evaluate_qnn(self, x: np.ndarray) -> np.ndarray:
'Evaluates the QNN for the given data x.\n\n Args:\n x (np.ndarray): Data points x\n Returns:\n The evaluated output of the QNN as numpy array\n '
if ((self._parameters is None) and (self.num_parameters == 0)):
self._parameters = []
if (self._parameters is None):
raise ValueError('Parameters have not been set yet!')
param = self._parameters[:self._qnn.num_parameters]
param_op = self._parameters[self._qnn.num_parameters:]
return self._qnn.evaluate_f(x, param, param_op)
def evaluate(self, x: np.ndarray, y: np.ndarray=None) -> np.ndarray:
'Evaluates the Projected Quantum Kernel for the given data points x and y.\n\n Args:\n x (np.ndarray): Data points x\n y (np.ndarray): Data points y, if None y = x is used\n Returns:\n The evaluated projected quantum kernel as numpy array\n '
if ((self._parameters is None) and (self.num_parameters == 0)):
self._parameters = np.array([])
if (self._parameters is None):
raise ValueError('Parameters have not been set yet!')
kernel_matrix = self._outer_kernel(self._qnn, self._parameters, x, y)
if ((self._regularization is not None) and (kernel_matrix.shape[0] == kernel_matrix.shape[1])):
kernel_matrix = self._regularize_matrix(kernel_matrix)
return kernel_matrix
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Projected Quantum Kernel.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params(deep=False)
params.update(self._outer_kernel.get_params())
params['measurement'] = self._measurement_input
params['num_qubits'] = self.num_qubits
params['regularization'] = self._regularization
params['outer_kernel'] = self._outer_kernel_input
if deep:
params.update(self._qnn.get_params())
return params
def set_params(self, **params):
'\n Sets value of the Projected Quantum Kernel hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``\n '
num_parameters_backup = self.num_parameters
parameters_backup = self._parameters
outer_kernel_input_backup = self._outer_kernel_input
valid_params = self.get_params()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
if ('num_qubits' in params):
self._encoding_circuit.set_params(num_qubits=params['num_qubits'])
if isinstance(self._measurement_input, list):
for m in self._measurement_input:
m.set_params(num_qubits=params['num_qubits'])
elif isinstance(self._measurement_input, ObservableBase):
self._measurement_input.set_params(num_qubits=params['num_qubits'])
self.__init__(self._encoding_circuit, self._executor, self._measurement_input, self._outer_kernel, None, self._parameter_seed, self._regularization, self._caching)
params.pop('num_qubits')
if ('measurement' in params):
self._measurement_input = params['measurement']
self.__init__(self._encoding_circuit, self._executor, self._measurement_input, self._outer_kernel, None, self._parameter_seed, self._regularization, self._caching)
params.pop('measurement')
if ('encoding_circuit' in params):
self._encoding_circuit = params['encoding_circuit']
self.__init__(self._encoding_circuit, self._executor, self._measurement_input, self._outer_kernel, None, self._parameter_seed, self._regularization, self._caching)
params.pop('encoding_circuit')
dict_ec = {}
for (key, value) in params.items():
if (key in self._encoding_circuit.get_params()):
dict_ec[key] = value
for key in dict_ec.keys():
params.pop(key)
if (len(dict_ec) > 0):
self._encoding_circuit.set_params(**dict_ec)
self.__init__(self._encoding_circuit, self._executor, self._measurement_input, self._outer_kernel, None, self._parameter_seed, self._regularization, self._caching)
dict_qnn = {}
for (key, value) in params.items():
if (key in self._qnn.get_params()):
dict_qnn[key] = value
for key in dict_qnn.keys():
params.pop(key)
if (len(dict_qnn) > 0):
self._qnn.set_params(**dict_qnn)
if ('outer_kernel' in params):
self._outer_kernel_input = params['outer_kernel']
self._set_outer_kernel(self._outer_kernel_input)
params.pop('outer_kernel')
else:
self._outer_kernel_input = outer_kernel_input_backup
dict_outer_kernel = {}
valid_keys_outer_kernel = self._outer_kernel.get_params().keys()
for key in params.keys():
if (key in valid_keys_outer_kernel):
dict_outer_kernel[key] = value
for key in dict_outer_kernel.keys():
params.pop(key)
if (len(dict_outer_kernel) > 0):
self._outer_kernel.set_params(**dict_outer_kernel)
if ('regularization' in params.keys()):
self._regularization = params['regularization']
params.pop('regularization')
if (self.num_parameters == num_parameters_backup):
self._parameters = parameters_backup
if (len(params) > 0):
raise ValueError('The following parameters could not be assigned:', params)
@property
def num_hyper_parameters(self) -> int:
'The number of hyper-parameters of the outer kernel'
return self._outer_kernel.num_hyper_parameters
@property
def name_hyper_parameters(self) -> List[str]:
'The names of the hyper-parameters of the outer kernel'
return self._outer_kernel.name_hyper_parameters
def _set_outer_kernel(self, outer_kernel: Union[(str, OuterKernelBase)], **kwargs):
'Private function for set-up the outer kernel\n\n Input can be a string for the sklearn outer kernels\n\n Args:\n outer_kernel (Union[str, OuterKernelBase]): OuterKernel that is applied to the\n expectation values\n **kwargs: Keyword arguments for the outer kernel\n '
if isinstance(outer_kernel, str):
kwargs.pop('num_qubits', None)
if (outer_kernel.lower() == 'gaussian'):
self._outer_kernel = GaussianOuterKernel(**kwargs)
elif (outer_kernel.lower() == 'matern'):
self._outer_kernel = OuterKernelBase.from_sklearn_kernel(Matern, **kwargs)
elif (outer_kernel.lower() == 'expsinesquared'):
self._outer_kernel = OuterKernelBase.from_sklearn_kernel(ExpSineSquared, **kwargs)
elif (outer_kernel.lower() == 'rationalquadratic'):
self._outer_kernel = OuterKernelBase.from_sklearn_kernel(RationalQuadratic, **kwargs)
elif (outer_kernel.lower() == 'dotproduct'):
self._outer_kernel = OuterKernelBase.from_sklearn_kernel(DotProduct, **kwargs)
elif (outer_kernel.lower() == 'pairwisekernel'):
self._outer_kernel = OuterKernelBase.from_sklearn_kernel(PairwiseKernel, **kwargs)
else:
raise ValueError('Unknown outer kernel: {}'.format(outer_kernel))
elif isinstance(outer_kernel, OuterKernelBase):
self._outer_kernel = outer_kernel
else:
raise ValueError('Unknown type of outer kernel: {}'.format(type(outer_kernel)))
|
class GaussianOuterKernel(OuterKernelBase):
'\n Implementation of the Gaussian outer kernel:\n\n .. math::\n k(x_i, x_j) = \text{exp}\\left(-\\gamma |(QNN(x_i)- QNN(x_j)|^2 \right)\n\n Args:\n gamma (float): hyperparameter :math:`\\gamma` of the Gaussian kernel\n '
def __init__(self, gamma=1.0):
super().__init__()
self.gamma = gamma
self._num_hyper_parameters = 1
self._name_hyper_parameters = ['gamma']
def __call__(self, qnn: QNN, parameters: np.ndarray, x: np.ndarray, y: np.ndarray=None) -> np.ndarray:
'Evaluates the QNN and returns the Gaussian projected kernel\n\n Args:\n qnn (QNN): QNN to be evaluated\n parameters (np.ndarray): parameters of the QNN\n x (np.ndarray): input data\n y (np.ndarray): second optional input data\n\n Returns:\n np.ndarray: Gaussian projected kernel\n '
param = parameters[:qnn.num_parameters]
param_op = parameters[qnn.num_parameters:]
if ((len(param.shape) == 1) and (len(param) == 1)):
param = float(param)
if ((len(param_op.shape) == 1) and (len(param_op) == 1)):
param_op = float(param_op)
x_result = qnn.evaluate_f(x, param, param_op)
if (y is not None):
y_result = qnn.evaluate_f(y, param, param_op)
else:
y_result = None
return RBF(length_scale=(1.0 / np.sqrt((2.0 * self.gamma))))(x_result, y_result)
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Gaussian outer kernel.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = {'gamma': self.gamma}
return params
def set_params(self, **params) -> None:
'\n Sets value of the Gaussian outer kernel hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values\n '
valid_params = self.get_params()
for (key, value) in params.items():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
try:
setattr(self, key, value)
except:
setattr(self, ('_' + key), value)
return None
|
class QGPC(GaussianProcessClassifier):
'\n Quantum Gaussian process classification (QGPC), that extends the scikit-learn\n `sklearn.gaussian_process.GaussianProcessClassifier\n <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier.html>`_.\n GaussianProcessClassifier class\n to use a quantum kernel.\n\n This class shows how to use a quantum kernel for QGPC. The class inherits its methods\n like ``fit`` and ``predict`` from scikit-learn, see the example below.\n Read more in the\n `scikit-learn user guide\n <https://scikit-learn.org/stable/modules/gaussian_process.html#gaussian-process>`_.\n Additional arguments can be set via ``**kwargs``.\n\n Args:\n quantum_kernel (Union[KernelMatrixBase, str]): The quantum kernel matrix to be used for the GP\n (either a fidelity quantum kernel (FQK)\n or projected quantum kernel (PQK) must be provided)\n **kwargs: Keyword arguments for the quantum kernel matrix, possible arguments can be obtained\n by calling ``get_params()``. Can be used to set for example the number of qubits\n (``num_qubits=``), or (if supported) the number of layers (``num_layers=``)\n of the underlying encoding circuit.\n\n See Also\n --------\n squlearn.kernel.ml.QSVC : Quantum Support Vector classification.\n\n **Example**\n\n .. code-block::\n\n from sklearn.datasets import load_iris\n from squlearn import Executor\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n from squlearn.kernel.matrix import FidelityKernel\n from squlearn.kernel.ml import QGPC\n X, y = load_iris(return_X_y=True)\n\n enc_circ = HubregtsenEncodingCircuit(num_qubits=X.shape[1], num_features=X.shape[1], num_layers=2)\n q_kernel = FidelityKernel(encoding_circuit=enc_circ, executor=Executor("statevector_simulator"))\n q_kernel.assign_parameters(np.random.rand(enc_circ.num_parameters))\n qgpc_ansatz = QGPC(quantum_kernel=q_kernel)\n qgpc_ansatz.fit(X, y)\n qgpc_ansatz.score(X, y)\n 0.98...\n qgpc_ansatz.predict_proba(X[:2,:])\n array([[0.85643716, 0.07037611, 0.07318673],\n [0.80314475, 0.09988938, 0.09696586]])\n\n Methods:\n --------\n '
def __init__(self, quantum_kernel: KernelMatrixBase, **kwargs) -> None:
self._quantum_kernel = quantum_kernel
quantum_kernel_update_params = (self.quantum_kernel.get_params().keys() & kwargs.keys())
if quantum_kernel_update_params:
self.quantum_kernel.set_params(**{key: kwargs[key] for key in quantum_kernel_update_params})
for key in quantum_kernel_update_params:
kwargs.pop(key, None)
super().__init__(**kwargs)
self.kernel = kernel_wrapper(self._quantum_kernel)
@classmethod
def _get_param_names(cls):
names = GaussianProcessClassifier._get_param_names()
names.remove('kernel')
names.remove('warm_start')
return names
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the QGPC class.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = dict()
for key in self._get_param_names():
params[key] = getattr(self, key)
params['quantum_kernel'] = self._quantum_kernel
if deep:
params.update(self._quantum_kernel.get_params(deep=deep))
return params
def set_params(self, **params) -> None:
'\n Sets value of the QGPC hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``.\n '
valid_params = self.get_params(deep=True).keys()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
self_params = (self.get_params(deep=False).keys() & params.keys())
for key in self_params:
try:
setattr(self, key, params[key])
except AttributeError:
setattr(self, ('_' + key), params[key])
quantum_kernel_params = (self._quantum_kernel.get_params().keys() & params.keys())
if quantum_kernel_params:
self._quantum_kernel.set_params(**{key: params[key] for key in quantum_kernel_params})
return self
@property
def quantum_kernel(self) -> KernelMatrixBase:
'Returns quantum kernel'
return self._quantum_kernel
@quantum_kernel.setter
def quantum_kernel(self, quantum_kernel: KernelMatrixBase):
'Sets quantum kernel'
self._quantum_kernel = quantum_kernel
self.kernel = kernel_wrapper(quantum_kernel)
|
class QKRR(BaseEstimator, RegressorMixin):
'\n Quantum Kernel Ridge Regression.\n\n This class implements the Quantum Kernel Ridge Regression analogous to KRR [1] in scikit-learn\n but is not a wrapper.\n Read more about the theoretical background of KRR in, e.g., the\n `scikit-learn user guide <https://scikit-learn.org/stable/modules/kernel_ridge.html#kernel-ridge>`_.\n Additional arguments can be set via ``**kwargs``.\n\n Args:\n quantum_kernel (Optional[Union[KernelMatrixBase, str]]) :\n The quantum kernel matrix to be used in the KRR pipeline (either a fidelity\n quantum kernel (FQK) or projected quantum kernel (PQK) must be provided). By\n setting quantum_kernel="precomputed", X is assumed to be a kernel matrix\n (train and test-train). This is particularly useful when storing quantum kernel\n matrices from real backends to numpy arrays.\n alpha (Union[float, np.ndarray], default=1.0e-6) :\n Hyperparameter for the regularization strength; must be a positive float. This\n regularization improves the conditioning of the problem and assure the solvability\n of the resulting linear system. Larger values specify stronger regularization, cf.,\n e.g., Ref. [2]\n **kwargs: Keyword arguments for the quantum kernel matrix, possible arguments can be obtained\n by calling ``get_params()``. Can be used to set for example the number of qubits\n (``num_qubits=``), or (if supported) the number of layers (``num_layers=``)\n of the underlying encoding circuit.\n\n Attributes:\n -----------\n dual_coeff\\_ : (np.ndarray) :\n Array containing the weight vector in kernel space\n k_train (np.ndarray) :\n Training kernel matrix of shape (n_train, n_train) which is available after calling the fit procedure\n k_testtrain (np.ndarray) :\n Kernel matrix of shape (n_test, n_train) which is evaluated at the predict step\n\n See Also\n --------\n squlearn.kernel.ml.QGPR : Quantum Gaussian Process regression.\n squlearn.kernel.ml.QSVR : Quantum Support Vector regression.\n\n References\n -----------\n [1] Kevin P. Murphy "Machine Learning: A Probabilistic Perspective", The MIT Press\n chapter 14.4.3, pp. 493-493\n\n [2] https://en.wikipedia.org/wiki/Ridge_regression\n\n\n **Example**\n\n .. code-block::\n\n from squlearn import Executor\n from squlearn.encoding_circuit import ChebyshevPQC\n from squlearn.kernel.matrix import ProjectedQuantumKernel\n from squlearn.kernel.ml import QKRR\n\n enc_circ = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=2)\n q_kernel_pqk = ProjectedQuantumKernel(\n encoding_circuit=enc_circ,\n executor=Executor("statevector_simulator"),\n measurement="XYZ",\n outer_kernel="gaussian",\n initial_parameters=param,\n gamma=2.0\n )\n qkrr_pqk = QKRR(quantum_kernel=q_kernel_pqk, alpha=1e-5)\n qkrr_pqk.fit(x_train.reshape(-1, 1), y_train)\n y_pred_pqk = qkrr_pqk.predict(x.reshape(-1, 1))\n\n Methods:\n --------\n '
def __init__(self, quantum_kernel: Optional[Union[(KernelMatrixBase, str)]]=None, alpha: Union[(float, np.ndarray)]=1e-06, **kwargs) -> None:
self._quantum_kernel = quantum_kernel
self.alpha = alpha
self.X_train = None
self.k_testtrain = None
self.k_train = None
self.dual_coeff_ = None
update_params = (self.get_params().keys() & kwargs.keys())
if update_params:
self.set_params(**{key: kwargs[key] for key in update_params})
def fit(self, X: np.ndarray, y: np.ndarray):
'\n Fit the Quantum Kernel Ridge regression model.\n\n Depending on whether ``regularization`` is set, the training kernel matrix is pre-processed\n accordingly prior to the actual fitting step is performed. The respective solution of the\n QKRR problem is obtained by solving the linear system using scipy\'s Cholesky decomposition\n for providing numerical stability.\n\n Args:\n X (np.ndarray) : Training data of shape (n_samples, n_features). If\n quantum_kernel == "precomputed" this is instead a precomputed training kernel\n matrix of shape (n_samples, n_samples).\n y (np.ndarray) : Target values or labels of shape (n_samples,)\n\n Returns:\n self :\n Returns the instance itself.\n '
self.X_train = X
if isinstance(self._quantum_kernel, str):
if (self._quantum_kernel == 'precomputed'):
self.k_train = X
else:
raise ValueError('Unknown quantum kernel: {}'.format(self._quantum_kernel))
elif isinstance(self._quantum_kernel, KernelMatrixBase):
self.k_train = self._quantum_kernel.evaluate(x=self.X_train)
else:
raise ValueError('Unknown type of quantum kernel: {}'.format(type(self._quantum_kernel)))
self.k_train = (self.k_train + (self.alpha * np.eye(self.k_train.shape[0])))
try:
L = scipy.linalg.cholesky(self.k_train, lower=True)
self.dual_coeff_ = scipy.linalg.cho_solve((L, True), y)
except np.linalg.LinAlgError:
print('Increase regularization parameter alpha')
return self
def predict(self, X: np.ndarray) -> np.ndarray:
'\n Predict using the Quantum Kernel Ridge model.\n\n Args:\n X (np.ndarray) : Samples of data of shape (n_samples, n_features) on which QKRR\n model makes predictions. If quantum_kernel == "precomputed" this is instead a\n precomputed (test-train) kernel matrix of shape (n_samples, n_samples_fitted),\n where n_samples_fitted is the number of samples used in the fitting.\n\n Returns:\n np.ndarray :\n Returns predicted labels (at X) of shape (n_samples,)\n '
if (self.k_train is None):
raise ValueError('The fit() method has to be called beforehand.')
if isinstance(self._quantum_kernel, str):
if (self._quantum_kernel == 'precomputed'):
self.k_testtrain = X
elif isinstance(self._quantum_kernel, KernelMatrixBase):
self.k_testtrain = self._quantum_kernel.evaluate(X, self.X_train)
else:
raise ValueError('Unknown type of quantum kernel: {}'.format(type(self._quantum_kernel)))
prediction = np.dot(self.k_testtrain, self.dual_coeff_)
return prediction
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyperparameters and their values of the QKRR method.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyperparameters and values.\n '
params = {'quantum_kernel': self._quantum_kernel, 'alpha': self.alpha}
if (deep and isinstance(self._quantum_kernel, KernelMatrixBase)):
params.update(self._quantum_kernel.get_params(deep=deep))
return params
def set_params(self, **params) -> None:
'\n Sets value of the encoding circuit hyperparameters.\n\n Args:\n params: Hyperparameters and their values, e.g. ``num_qubits=2``.\n '
valid_params = self.get_params()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
self_params = (self.get_params(deep=False).keys() & params.keys())
for key in self_params:
try:
setattr(self, key, params[key])
except AttributeError:
setattr(self, ('_' + key), params[key])
if isinstance(self._quantum_kernel, KernelMatrixBase):
quantum_kernel_params = (self._quantum_kernel.get_params().keys() & params.keys())
if quantum_kernel_params:
self._quantum_kernel.set_params(**{key: params[key] for key in quantum_kernel_params})
return self
|
class QSVC(SVC):
'\n Quantum Support Vector Classification\n\n This class is a wrapper of :class:`sklearn.svm.SVC`. It uses a quantum kernel matrix\n to replace the kernel matrix in the :class:`sklearn.svm.SVC` class.\n The parameters of the parent class can be adjusted via ``**kwargs``. See the documentation\n there for additional information about the standard SVC parameters.\n The scikit-learn SVC has kernel specific arguments that are omitted here because they do not\n apply to the quantum kernels. These are\n\n - `kernel`\n - `gamma`\n - `degree`\n - `coef0`\n\n Args:\n quantum_kernel (Union[KernelMatrixBase, str]): The quantum kernel matrix to be used in the SVC. Either\n a fidelity quantum kernel (FQK) or projected quantum kernel (PQK) must be provided. By\n setting quantum_kernel="precomputed", X is assumed to be a kernel matrix\n (train and test-train). This is particularly useful when storing quantum kernel\n matrices from real backends to numpy arrays.\n **kwargs: Possible arguments can be\n obtained by calling ``get_params()``. Notable examples are parameters of the\n :class:`sklearn.svm.SVC` class such as the regularization parameters ``C``\n (float, default=1.0). Additionally, properties of the underlying encoding circuit can be\n adjusted via kwargs such as the number of qubits (``num_qubits``), or (if supported)\n the number of layers (``num_layers``).\n\n See Also\n --------\n squlearn.kernel.ml.QSVR : Quantum Support Vector Regression\n\n **Example**\n\n .. code-block::\n\n import numpy as np\n\n from sklearn.datasets import make_moons\n from sklearn.model_selection import train_test_split\n\n from squlearn import Executor\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n from squlearn.kernel.ml.qsvc import QSVC\n from squlearn.kernel.matrix import ProjectedQuantumKernel\n\n encoding_circuit = HubregtsenEncodingCircuit(num_qubits=2, num_features=2, num_layers=2)\n kernel = ProjectedQuantumKernel(\n encoding_circuit,\n executor=Executor("statevector_simulator"),\n initial_parameters=np.random.rand(encoding_circuit.num_parameters)\n )\n\n X, y = make_moons(n_samples=100, noise=0.3, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n qsvc = QSVC(quantum_kernel=kernel)\n qsvc.fit(X_train, y_train)\n print(f"The score on the test set is {qsvc.score(X_test, y_test)}")\n\n Methods:\n --------\n '
def __init__(self, quantum_kernel: Optional[Union[(KernelMatrixBase, str)]]=None, **kwargs) -> None:
self.quantum_kernel = quantum_kernel
if isinstance(self.quantum_kernel, KernelMatrixBase):
quantum_kernel_update_params = (self.quantum_kernel.get_params().keys() & kwargs.keys())
if quantum_kernel_update_params:
self.quantum_kernel.set_params(**{key: kwargs[key] for key in quantum_kernel_update_params})
for key in quantum_kernel_update_params:
kwargs.pop(key, None)
super().__init__(kernel=self.quantum_kernel.evaluate, **kwargs)
else:
super().__init__(kernel='precomputed', **kwargs)
@classmethod
def _get_param_names(cls):
names = SVC._get_param_names()
names.remove('kernel')
names.remove('gamma')
names.remove('degree')
names.remove('coef0')
return names
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the QSVC class.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = dict()
for key in self._get_param_names():
params[key] = getattr(self, key)
params['quantum_kernel'] = self.quantum_kernel
if (deep and isinstance(self.quantum_kernel, KernelMatrixBase)):
params.update(self.quantum_kernel.get_params(deep=deep))
return params
def set_params(self, **params) -> None:
'\n Sets value of the QSVC hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``.\n '
valid_params = self.get_params(deep=True).keys()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
self_params = (self.get_params(deep=False).keys() & params.keys())
for key in self_params:
try:
setattr(self, key, params[key])
except AttributeError:
setattr(self, ('_' + key), params[key])
if isinstance(self.quantum_kernel, KernelMatrixBase):
quantum_kernel_params = (self.quantum_kernel.get_params().keys() & params.keys())
if quantum_kernel_params:
self.quantum_kernel.set_params(**{key: params[key] for key in quantum_kernel_params})
return self
|
class QSVR(SVR):
'\n Quantum Support Vector Regression\n\n This class is a wrapper of :class:`sklearn.svm.SVR`. It uses a quantum kernel matrix\n to replace the kernel matrix in the :class:`sklearn.svm.SVR` class. The parameters of the\n parent class can be adjusted via ``**kwargs``.\n See the documentation there for additional information about the standard SVR parameters.\n The scikit-learn SVR has kernel specific arguments that are omitted here because they do not\n apply to the quantum kernels. These are\n\n - `kernel`\n - `gamma`\n - `degree`\n - `coef0`\n\n Args:\n quantum_kernel (Union[KernelMatrixBase, str]): The quantum kernel matrix to be used in the SVC. Either\n a fidelity quantum kernel (FQK) or projected quantum kernel (PQK) must be provided. By\n setting quantum_kernel="precomputed", X is assumed to be a kernel matrix\n (train and test-train). This is particularly useful when storing quantum kernel\n matrices from real backends to numpy arrays.\n **kwargs: Possible arguments can be\n obtained by calling ``get_params()``. Notable examples are parameters of the\n :class:`sklearn.svm.SVR` class such as the regularization parameters ``C``\n (float, default=1.0) or epsilon (float, default=0.1). Additionally, properties of the\n underlying encoding circuit can be adjusted via kwargs such as the number of qubits\n (``num_qubits``), or (if supported) the number of layers (``num_layers``).\n\n See Also\n --------\n squlearn.kernel.ml.QSVC : Quantum Support Vector Classification\n\n **Example**\n\n .. code-block::\n\n import numpy as np\n\n from sklearn.model_selection import train_test_split\n\n from squlearn import Executor\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n from squlearn.kernel.ml.qsvr import QSVR\n from squlearn.kernel.matrix import ProjectedQuantumKernel\n\n encoding_circuit = HubregtsenEncodingCircuit(num_qubits=2, num_features=1, num_layers=2)\n kernel = ProjectedQuantumKernel(\n encoding_circuit,\n executor=Executor("statevector_simulator"),\n initial_parameters=np.random.rand(encoding_circuit.num_parameters))\n\n X = np.linspace(0, np.pi, 100)\n y = np.sin(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n qsvc = QSVR(quantum_kernel=kernel)\n qsvc.fit(X_train, y_train)\n print(f"The score on the test set is {qsvc.score(X_test, y_test)}")\n\n Methods:\n --------\n '
def __init__(self, quantum_kernel: Optional[Union[(KernelMatrixBase, str)]]=None, **kwargs) -> None:
self.quantum_kernel = quantum_kernel
if isinstance(self.quantum_kernel, KernelMatrixBase):
quantum_kernel_update_params = (self.quantum_kernel.get_params().keys() & kwargs.keys())
if quantum_kernel_update_params:
self.quantum_kernel.set_params(**{key: kwargs[key] for key in quantum_kernel_update_params})
for key in quantum_kernel_update_params:
kwargs.pop(key, None)
super().__init__(kernel=self.quantum_kernel.evaluate, **kwargs)
else:
super().__init__(kernel='precomputed', **kwargs)
@classmethod
def _get_param_names(cls):
names = SVR._get_param_names()
names.remove('kernel')
names.remove('gamma')
names.remove('degree')
names.remove('coef0')
return names
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the QSVR class.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = dict()
for key in self._get_param_names():
params[key] = getattr(self, key)
params['quantum_kernel'] = self.quantum_kernel
if (deep and isinstance(self.quantum_kernel, KernelMatrixBase)):
params.update(self.quantum_kernel.get_params(deep=deep))
return params
def set_params(self, **params) -> None:
'\n Sets value of the QSVR hyper-parameters.\n\n Args:\n params: Hyper-parameters and their values, e.g. ``num_qubits=2``.\n '
valid_params = self.get_params(deep=True).keys()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
self_params = (self.get_params(deep=False).keys() & params.keys())
for key in self_params:
try:
setattr(self, key, params[key])
except AttributeError:
setattr(self, ('_' + key), params[key])
if isinstance(self.quantum_kernel, KernelMatrixBase):
quantum_kernel_params = (self.quantum_kernel.get_params().keys() & params.keys())
if quantum_kernel_params:
self.quantum_kernel.set_params(**{key: params[key] for key in quantum_kernel_params})
return self
|
class KernelLossBase():
'\n Empty parent class for a kernel loss function.\n\n Args:\n quantum_kernel (KernelMatrixBase) : Specified quantum kernel object (either FQK or PQK)\n '
def __init__(self, quantum_kernel: KernelMatrixBase) -> None:
self._quantum_kernel = quantum_kernel
def compute(self):
'\n Empty function for holding the actual implementation for computing the respective\n kernel loss function.\n\n Args:\n self :\n Holds the function itself\n '
raise NotImplementedError
|
class KernelOptimizerBase():
"\n Empty parent class for defining a kernel optimizer object.\n\n Args:\n loss (KernelLossBase) :\n Loss function to be used for the kernel optimization\n optimizer (OptimizerBase) :\n Optimizer from squlearn.optimizers used for finding the minimum of the respective\n loss function.\n initial_parameters (Optional[Sequence[float]]) :\n Initial guess for the encoding circuit's trainable parameters which are to be optimized\n "
def __init__(self, loss: KernelLossBase=None, optimizer: OptimizerBase=None, initial_parameters: Optional[Sequence[float]]=None) -> None:
self._loss = loss
self._optimizer = optimizer
self._initial_parameters = initial_parameters
def run_optimization(self, X: np.ndarray, y: np.ndarray=None):
'\n Empty function to start running the actual optimization.\n\n Args:\n X (np.ndarray) :\n Data set features\n y (np.ndarray) :\n Data set labels\n '
raise NotImplementedError
|
class KernelOptimizer(KernelOptimizerBase):
'\n Quantum kernel optimizer.\n This class can be used to optimize the variational parameters of a quantum kernel.\n\n\n Args:\n loss (KernelLossBase): The loss function to be minimized.\n optimizer (OptimizerBase): The optimizer to be used.\n initial_parameters (Optional[Sequence[float]]): Initial parameters for the optimizer.\n\n\n **Example**\n\n .. code-block::\n\n from squlearn import Executor\n from squlearn.encoding_circuit import HubregtsenEncodingCircuit\n from squlearn.kernel.matrix import FidelityKernel\n from squlearn.optimizers import Adam\n from squlearn.kernel.optimization import NLL\n enc_circ = HubregtsenEncodingCircuit(num_qubits=num_qubits, num_features=num_features, num_layers=2)\n q_kernel = FidelityKernel(encoding_circuit=enc_circ, executor=Executor("statevector_simulator"))\n adam = Adam(options={"maxiter": 20, "lr": 0.1})\n nll_loss = NLL(quantum_kernel=q_kernel, sigma=noise_std**2)\n optimizer = KernelOptimizer(loss=nll_loss, optimizer=adam,\n initial_parameters=np.random.rand(enc_circ.num_parameters))\n opt_result = optimizer.run_optimization(X=X_train, y=Y_train)\n optimal_parameters = opt_result.x\n q_kernel.assign_parameters(optimal_parameters)\n\n Methods:\n ----------\n '
def __init__(self, loss: KernelLossBase=None, optimizer: OptimizerBase=None, initial_parameters: Optional[Sequence[float]]=None) -> None:
super().__init__(loss, optimizer, initial_parameters)
self._quantum_kernel = loss._quantum_kernel
self._opt_result = None
self._optimier_evals = None
self._optimal_value = None
self._optimal_point = None
self._optimal_parameters = None
if (self._initial_parameters is None):
self._initial_parameters = self._quantum_kernel.parameters
def run_optimization(self, X: np.ndarray, y: np.ndarray=None):
'Run the optimization and return the result.\n\n Args:\n X (np.ndarray): The input data.\n y (np.ndarray): The labels.\n\n Returns:\n OptimizeResult: The optimization result.\n '
num_params = self._quantum_kernel.num_parameters
if (num_params == 0):
raise ValueError('Quantum kernel cannot be fit because there are no training parameters specified.')
loss_function = partial(self._loss.compute, data=X, labels=y)
opt_result = self._optimizer.minimize(fun=loss_function, x0=self._initial_parameters)
self._optimal_value = opt_result.fun
self._optimal_point = opt_result.x
self._opt_result = opt_result
return self._opt_result
|
class NLL(KernelLossBase):
'\n Negative log likelihood loss function.\n This class can be used to compute the negative log likelihood loss function\n for a given quantum kernel\n :math:`K_{θ}` with variational parameters :math:`θ`.\n The definition of the function is taken from Equation 5.8 Chapter 5.4 of Ref. [1].\n\n The log-likelihood function is defined as:\n\n .. math::\n\n L(θ) =\n -\\frac{1}{2} log(|K_{θ} + σI|)-\\frac{1}{2} y^{T}(K_{θ} + σI)^{-1}y-\\frac{n}{2} log(2π)\n\n Args:\n quantum_kernel (KernelMatrixBase): The quantum kernel to be used\n (either a fidelity quantum kernel (FQK)\n or projected quantum kernel (PQK) must be provided).\n sigma: (float), default=0.0: Hyperparameter for the regularization strength.\n\n References\n ----------\n [1]: `Carl E. Rasmussen and Christopher K.I. Williams,\n "Gaussian Processes for Machine Learning",\n MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_\n\n Methods:\n --------\n '
def __init__(self, quantum_kernel: KernelMatrixBase, sigma=0.0):
super().__init__(quantum_kernel)
self._sigma = sigma
def compute(self, parameter_values: Sequence[float], data: np.ndarray, labels: np.ndarray) -> float:
'Compute the negative log likelihood loss function.\n\n Args:\n parameter_values: (Sequence[float]):\n The parameter values for the variational quantum kernel parameters.\n data (np.ndarray): The training data to be used for the kernel matrix.\n labels (np.ndarray): The training labels.\n\n Returns:\n float: The negative log likelihood loss function.\n '
self._quantum_kernel.assign_parameters(parameter_values)
kmatrix = self._quantum_kernel.evaluate(data)
kmatrix = (kmatrix + (self._sigma * np.eye(kmatrix.shape[0])))
L = scipy.linalg.cholesky(kmatrix, lower=True)
S1 = scipy.linalg.solve_triangular(L, labels, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, S1, lower=False)
neg_log_lh = ((np.sum(np.log(np.diagonal(L))) + ((0.5 * labels.T) @ S2)) + ((0.5 * len(data)) * np.log((2.0 * np.pi))))
neg_log_lh = neg_log_lh.reshape((- 1))
return neg_log_lh
|
class TargetAlignment(KernelLossBase):
'\n Target alignment loss function.\n This class can be used to compute the target alignment for a given quantum kernel\n :math:`K_{θ}` with variational parameters :math:`θ`.\n The definition of the function is taken from Equation (27,28) of [1].\n The log-likelihood function is defined as:\n\n .. math::\n\n TA(K_{θ}) =\n \\frac{\\sum_{i,j} K_{θ}(x_i, x_j) y_i y_j}\n {\\sqrt{\\sum_{i,j} K_{θ}(x_i, x_j)^2 \\sum_{i,j} y_i^2 y_j^2}}\n\n Args:\n quantum_kernel (KernelMatrixBase): The quantum kernel to be used\n (either a fidelity quantum kernel (FQK)\n or projected quantum kernel (PQK) must be provided).\n\n References\n -----------\n [1]: T. Hubregtsen et al.,\n "Training Quantum Embedding Kernels on Near-Term Quantum Computers",\n `arXiv:2105.02276v1 (2021) <https://arxiv.org/abs/2105.02276>`_.\n\n Methods:\n --------\n '
def __init__(self, quantum_kernel: KernelMatrixBase):
super().__init__(quantum_kernel)
def compute(self, parameter_values: Sequence[float], data: np.ndarray, labels: np.ndarray, rescale_class_labels=True) -> float:
'Compute the target alignment.\n\n Args:\n parameter_values: (Sequence[float]):\n The parameter values for the variational quantum kernel parameters.\n data (np.ndarray): The training data to be used for the kernel matrix.\n labels (np.ndarray): The training labels.\n rescale_class_labels: (bool), default=True:\n Whether to rescale the class labels to -1 and 1.\n\n Returns:\n float: The negative target alignment.\n '
self._quantum_kernel.assign_parameters(parameter_values)
kmatrix = self._quantum_kernel.evaluate(data)
if rescale_class_labels:
nplus = np.count_nonzero((np.array(labels) == 1))
nminus = (len(labels) - nplus)
_Y = np.array([((y / nplus) if (y == 1) else (y / nminus)) for y in labels])
else:
_Y = np.array(labels)
T = np.outer(_Y, _Y)
inner_product = np.sum((kmatrix * T))
norm = np.sqrt((np.sum((kmatrix * kmatrix)) * np.sum((T * T))))
alignment = (inner_product / norm)
return (- alignment)
|
class ObservableDerivatives():
'Class for calculating derivatives of observables.\n\n The derivatives are calculated by automatic differentiation of parameter in the expectation\n operator. Also, squaring of the operator is implemented.\n The class can either applied on a single operator, or on a list of operators.\n results are cached for faster evaluation.\n\n Args:\n observable (Union[ObservableBase, list]): Expectation operator or list\n of observables from\n which the derivatives are\n obtained.\n optree_caching (bool): If True, the optree structure of the observable is cached\n\n .. list-table:: Strings that are recognized by the :meth:`get_derivative` method\n :widths: 25 75\n :header-rows: 1\n\n * - String\n - Derivative\n * - ``"O"``\n - Expectation operator :math:`\\hat{O}`\n * - ``"OO"``\n - Squared observable :math:`\\hat{O}^2`\n * - ``"dop"`` or ``"Odop"``\n - First-order derivative of the observable:\n :math:`\\frac{d}{dp}\\hat{O}(p)`\n * - ``"dopdop"`` or ``"Odopdop"``\n - Second-order derivative of the observable:\n :math:`\\frac{d^2}{dp^2}\\hat{O}(p)`\n * - ``"OOdop"``\n - First-order derivative of the squared observable:\n :math:`\\frac{d}{dp}\\hat{O}^2(p)`\n * - ``"OOdopdop"``\n - Second-order derivative of the squared observable:\n :math:`\\frac{d^2}{dp^2}\\hat{O}^2(p)`\n * - ``"I"``\n - Returns a identity operator with the same number of qubits as the provided\n observable\n\n **Example: first-order derivative of the Ising Hamiltonian**\n\n .. jupyter-execute::\n\n from squlearn.observables import IsingHamiltonian\n from squlearn.observables.observable_derivatives import ObservableDerivatives\n op = IsingHamiltonian(num_qubits=3)\n print(ObservableDerivatives(op).get_derivative("dop"))\n\n **Example: Squared summed Pauli Operator**\n\n .. jupyter-execute::\n\n from squlearn.observables import SummedPaulis\n from squlearn.observables.observable_derivatives import ObservableDerivatives\n op = SummedPaulis(num_qubits=3)\n print(ObservableDerivatives(op).get_operator_squared())\n\n Attributes:\n -----------\n\n Attributes:\n parameter_vector (ParameterVector): Parameter vector used in the observable\n num_parameters (int): Total number of trainable parameters in the observable\n num_operators (int): Number operators in case of multiple observables\n\n '
def __init__(self, observable: Union[(ObservableBase, list)], optree_caching=True):
self._observable = observable
if isinstance(self._observable, ObservableBase):
self.multiple_output = False
self._num_operators = 1
self._parameter_vector = ParameterVector('p_op', observable.num_parameters)
optree = OpTreeOperator(self._observable.get_operator(self._parameter_vector))
else:
observable_list = []
self.multiple_output = True
self._num_operators = len(observable)
try:
n_oper = 0
for op in self._observable:
n_oper = (n_oper + op.num_parameters)
self._parameter_vector = ParameterVector('p_op', n_oper)
ioff = 0
for op in self._observable:
observable_list.append(OpTreeOperator(op.get_operator(self._parameter_vector[ioff:])))
ioff = (ioff + op.num_parameters)
optree = OpTreeList(observable_list)
except:
raise ValueError('Unknown structure of the Expectation operator!')
self._optree_start = optree
self._optree_cache = {}
self._optree_caching = optree_caching
if self._optree_caching:
self._optree_cache['O'] = optree
def get_derivative(self, derivative: Union[(str, tuple, list)]) -> OpTreeElementBase:
'Determine the derivative of the observable.\n\n Args:\n derivative (str or tuple): String or tuple of parameters for specifying the derivation.\n See :class:`ObservableDerivatives` for more\n information.\n\n Return:\n Differentiated observable in OpTree format\n '
if isinstance(derivative, str):
if (derivative == 'I'):
measure_op = OpTreeOperator(SparsePauliOp(('I' * self._observable.num_qubits)))
elif (derivative == 'O'):
measure_op = self._optree_start
elif (derivative == 'OO'):
measure_op = self.get_operator_squared()
elif ((derivative == 'dop') or (derivative == 'Odop')):
measure_op = self._differentiation_from_tuple(self._optree_start.copy(), (self._parameter_vector,), 'O')
elif ((derivative == 'dopdop') or (derivative == 'Odopdop')):
measure_op = self._differentiation_from_tuple(self._optree_start.copy(), (self._parameter_vector, self._parameter_vector), 'O')
elif (derivative == 'OOdop'):
measure_op = self._differentiation_from_tuple(self.get_operator_squared(), (self._parameter_vector,), 'OO')
elif (derivative == 'OOdopdop'):
measure_op = self._differentiation_from_tuple(self.get_operator_squared(), (self._parameter_vector, self._parameter_vector), 'OO')
else:
raise ValueError('Unknown string command:', derivative)
elif isinstance(derivative, tuple):
measure_op = self._differentiation_from_tuple(self._optree_start, derivative, 'O')
elif isinstance(derivative, list):
measure_op = self._differentiation_from_tuple(self._optree_start, (derivative,), 'O')
else:
raise TypeError('Input is neither string, list nor tuple, but:', type(derivative))
measure_op.replace = False
return measure_op
def _differentiation_from_tuple(self, optree: OpTreeElementBase, diff_tuple: tuple, observable_label: str) -> OpTreeElementBase:
'Recursive routine for automatic differentiating the observable\n\n Args:\n optree (OpTreeElementBase): optree structure of the observable\n diff_tuple (tuple): Tuple containing ParameterVectors or ParameterExpressions\n observable_label (str): string for labeling the observable\n\n Return:\n The differentiated OpTree expression\n '
def helper_hash(diff):
if isinstance(diff, list):
return (('list',) + tuple([helper_hash(d) for d in diff]))
elif isinstance(diff, tuple):
return tuple([helper_hash(d) for d in diff])
else:
return diff
if (diff_tuple == ()):
return optree
elif ((self._optree_caching == True) and ((helper_hash(diff_tuple), observable_label) in self._optree_cache)):
return self._optree_cache[(diff_tuple, observable_label)].copy()
else:
measure = operator_differentiation(self._differentiation_from_tuple(optree, diff_tuple[1:], observable_label), diff_tuple[0])
if (self._optree_caching == True):
self._optree_cache[(helper_hash(diff_tuple), observable_label)] = measure
return measure
def get_operator_squared(self):
'Returns the squared form of the observable OO=O^2'
if ((self._optree_caching == True) and ('OO' in self._optree_cache)):
return self._optree_cache['OO'].copy()
else:
def recursive_squaring(op):
if isinstance(op, OpTreeOperator):
return OpTreeOperator(op.operator.power(2))
elif isinstance(op, SparsePauliOp):
return op.operator.power(2)
elif isinstance(op, OpTreeSum):
return OpTreeSum([recursive_squaring(child) for child in op.children], op.factor, op.operation)
elif isinstance(op, OpTreeList):
return OpTreeList([recursive_squaring(child) for child in op.children], op.factor, op.operation)
else:
raise ValueError('Unknown type in recursive_squaring:', type(op))
O2 = OpTree.simplify(recursive_squaring(self._optree_start))
if (self._optree_caching == True):
self._optree_cache['OO'] = O2
return O2
@property
def parameter_vector(self):
'Parameter vector of the observable'
return self._parameter_vector
@property
def num_parameters(self):
'Total number of trainable parameters in the observable'
return len(self._parameter_vector)
@property
def num_operators(self):
'Number operators in case of multiple observables'
return self._num_operators
def assign_parameters(self, operator: OpTreeElementBase, parameters: np.ndarray) -> OpTreeElementBase:
'Assign parameters to a derivative that is obtained from this class.\n\n Args:\n operator (OperatorBase): Operator to which the parameters are assigned\n parameters (np.ndarray): Parameters values that replace the parameters in the operator\n\n Return:\n Operator with assigned parameters\n '
(param_op_inp, multi_param_op) = adjust_parameters(parameters, len(self._parameter_vector))
return_list = []
for p in param_op_inp:
dic = dict(zip(self._parameter_vector, p))
return_list.append(OpTree.assign_parameters(operator, dic))
if multi_param_op:
return OpTreeList(return_list)
else:
return return_list[0]
|
def operator_differentiation(optree: OpTreeElementBase, parameters: Union[(ParameterVector, list, ParameterExpression)]) -> OpTreeElementBase:
'Function for differentiating a given observable w.r.t. to its parameters\n\n Args:\n optree (OpTreeElementBase): optree structure of the observable, can also be a\n list of observables\n parameters: Union[ParameterVector, list, ParameterExpression]: Parameters that are used for\n the differentiation.\n Returns:\n Differentiated observable as an OpTree\n '
if ((parameters == None) or (parameters == [])):
return None
if isinstance(parameters, ParameterVectorElement):
parameters = [parameters]
if (len(parameters) == 0):
return OpTreeList([])
params_name = parameters[0].name.split('[', 1)[0]
for p in parameters:
if (p.name.split('[', 1)[0] != params_name):
raise TypeError('Differentiable variables are not the same type.')
return OpTree.simplify(OpTree.derivative.differentiate(optree, parameters))
|
class CustomObservable(ObservableBase):
"\n Class for defining a custom observable.\n\n The operator is supplied as a string of Pauli operators, e.g. ``operator_string='ZI'`` for\n a two qubit operator with a Z operator on the second qubit.\n Note that the index of the qubits is reversed, i.e. the first qubit is the last character\n in the string, similar to the Qiskit computational state numbering.\n\n Multiple operators that are summed can be specified by a list of strings, e.g.\n ``operator_string=['ZZ', 'XX']``.\n\n Args:\n num_qubits (int): Number of qubits.\n operator_string (Union[str, list[str], tuple[str]]): String of operator to measure.\n Also list or tuples of strings are allowed for multiple operators.\n parameterized (bool): If True, the operator is parameterized.\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits.\n num_parameters (int): Number of trainable parameters in the custom operator.\n operator_string (Union[str, list[str], tuple[str]]): String of operator to measure.\n parameterized (bool): If True, the operator is parameterized.\n\n "
def __init__(self, num_qubits: int, operator_string: Union[(str, list[str], tuple[str])], parameterized: bool=False) -> None:
super().__init__(num_qubits)
self.operator_string = operator_string
if isinstance(self.operator_string, str):
self.operator_string = [self.operator_string]
self.parameterized = parameterized
for s in self.operator_string:
if (len(s) != self.num_qubits):
raise ValueError(('Supplied string has not the same size as the number of qubits, ' + "please add missing identities as 'I'"))
for s_ in s:
if (s_ not in ['I', 'X', 'Y', 'Z']):
raise ValueError('Only Pauli operators I, X, Y, Z are allowed.')
@property
def num_parameters(self):
'Returns the number of trainable parameters in the custom operator'
if self.parameterized:
return len(self.operator_string)
else:
return 0
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the custom operator.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['operator_string'] = self.operator_string
params['parameterized'] = self.parameterized
return params
def get_pauli(self, parameters: Union[(ParameterVector, np.ndarray)]=None) -> SparsePauliOp:
'\n Function for generating the SparsePauliOp expression of the custom operator.\n\n Args:\n parameters (Union[ParameterVector, np.ndarray]): Parameters of the custom operator.\n\n Returns:\n SparsePauliOp expression of the specified custom operator.\n '
op_list = []
param_list = []
if self.parameterized:
nparam = len(parameters)
op_list.append(self.operator_string[0])
param_list.append(parameters[(0 % nparam)])
ioff = 1
for j in range(1, len(self.operator_string)):
op_list.append(self.operator_string[j])
param_list.append(parameters[(ioff % nparam)])
ioff = (ioff + 1)
return SparsePauliOp(op_list, param_list)
else:
op_list.append(self.operator_string[0])
for j in range(1, len(self.operator_string)):
op_list.append(self.operator_string[j])
return SparsePauliOp(op_list)
|
class IsingHamiltonian(ObservableBase):
"\n Implementation of Ising type Hamiltonians.\n\n **Equation for a the full Ising Hamiltonian:**\n\n .. math::\n \\hat{H} = a\\hat{I} + \\sum_i b_i \\hat{Z}_i + \\sum_i c_i \\hat{X}_i +\n \\sum_{i>j} d_{ij} \\hat{Z}_i \\hat{Z}_j\n\n where :math:`a`, :math:`b_i`, :math:`c_i`, and :math:`d_{ij}` are trainable parameters.\n\n The options allow, to switch terms on and off and to set the parameters\n additionally to be equal for the same kind of term.\n\n Example for creating an Ising Hamiltonian with no :math:`\\hat{Z}` term, :math:`\\hat{X}` term with equal\n parameters, and the :math:`\\hat{Z}\\hat{Z}` term with different parameters:\n\n .. jupyter-execute::\n\n from squlearn.observables import IsingHamiltonian\n ob = IsingHamiltonian(num_qubits=2, I='S', Z='N', X='S', ZZ='F')\n print(ob)\n\n\n The default Ising Hamiltonian reads:\n\n .. math::\n \\hat{H} = a\\hat{I} + \\sum_i b_i \\hat{Z}_i +\n \\sum_{i>j} d_{ij} \\hat{Z}_i \\hat{Z}_j\n\n Args:\n num_qubits (int): number of qubits\n I (str): parameter options for identity term. ``I='S'`` trainable parameter, ``I='N'`` for zero\n Z (str): parameter options for Z term. ``Z='S'`` same parameter in the\n sum (:math:`\\forall ~i:~ b_i=b`), ``Z='N'`` for zero,\n ``Z='F'`` all :math:`b_i` values are considered\n X (str): parameter options for X term. ``X='S'`` same parameter in the\n sum (:math:`\\forall~ i: ~c_i=c`), ``X='N'`` for zero,\n ``X='F'`` all :math:`c_i` values are considered\n ZZ (str): parameter options for ZZ term. ``ZZ='S'`` same parameter in the\n sum (:math:`\\forall~ i,j: ~d_{ij}=d`), ``ZZ='N'`` for zero,\n ``ZZ='F'`` all :math:`d_{ij}` values are considered\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): number of qubits\n num_parameters (int): number of trainable parameters in the Ising Hamiltonian\n I (str): parameter options for identity term\n Z (str): parameter options for Z term\n X (str): parameter options for X term\n ZZ (str): parameter options for ZZ term\n\n "
def __init__(self, num_qubits: int, I: str='S', Z: str='F', X: str='N', ZZ: str='F') -> None:
super().__init__(num_qubits)
self.I = I
self.Z = Z
self.X = X
self.ZZ = ZZ
if (self.I not in ['S', 'N']):
raise ValueError(("Only the characters 'S' and 'N' are" + 'supported as characters for I'))
if ((self.Z not in ['F', 'S', 'N']) or (self.ZZ not in ['F', 'S', 'N']) or (self.ZZ not in ['F', 'S', 'N'])):
raise ValueError(("Only the characters 'F','S','N' are" + 'supported as characters for Z, ZZ, and X'))
@property
def num_parameters(self):
'Returns the number of free parameters in the observable'
num_parameters = 0
if (self.I == 'S'):
num_parameters += 1
if (self.Z == 'S'):
num_parameters += 1
elif (self.Z == 'F'):
num_parameters += self.num_qubits
if (self.X == 'S'):
num_parameters += 1
elif (self.X == 'F'):
num_parameters += self.num_qubits
if (self.ZZ == 'S'):
num_parameters += 1
elif (self.ZZ == 'F'):
num_parameters += ((self.num_qubits * (self.num_qubits - 1)) // 2)
return num_parameters
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Ising Hamiltonian operator.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['I'] = self.I
params['Z'] = self.Z
params['X'] = self.X
params['ZZ'] = self.ZZ
return params
def get_pauli(self, parameters: Union[(ParameterVector, np.ndarray)]) -> SparsePauliOp:
'\n Function for generating the SparsePauliOp expression of the Ising Hamiltonian.\n\n Args:\n parameters (Union[ParameterVector, np.ndarray]): parameters of the Ising Hamiltonian.\n\n Returns:\n SparsePauliOp expression of the specified Ising Hamiltonian.\n '
def gen_double_ising_string(i, j):
H = ('I' * self.num_qubits)
H = ((H[(i + 1):] + 'Z') + H[:i])
if (i != j):
H = ((H[:((self.num_qubits - j) - 1)] + 'Z') + H[(self.num_qubits - j):])
return H
def gen_single_ising_string(i, str):
H = ('I' * self.num_qubits)
H = ((H[(i + 1):] + str) + H[:i])
return H
nparam = len(parameters)
ioff = 0
op_list = []
coeff_list = []
if (self.I == 'S'):
op_list.append(('I' * self.num_qubits))
coeff_list.append(parameters[(ioff % nparam)])
ioff += 1
if ((self.Z == 'S') or (self.Z == 'F')):
for i in range(self.num_qubits):
op_list.append(gen_single_ising_string(i, 'Z'))
coeff_list.append(parameters[(ioff % nparam)])
if (self.Z == 'F'):
ioff += 1
if (self.Z == 'S'):
ioff += 1
if ((self.X == 'S') or (self.X == 'F')):
for i in range(self.num_qubits):
op_list.append(gen_single_ising_string(i, 'X'))
coeff_list.append(parameters[(ioff % nparam)])
if (self.X == 'F'):
ioff += 1
if (self.X == 'S'):
ioff += 1
if ((self.ZZ == 'S') or (self.ZZ == 'F')):
for i in range(self.num_qubits):
for j in range(i):
op_list.append(gen_double_ising_string(i, j))
coeff_list.append(parameters[(ioff % nparam)])
if (self.ZZ == 'F'):
ioff += 1
if (self.ZZ == 'S'):
ioff += 1
if (len(op_list) == 0):
raise ValueError('No Pauli terms available in the Ising Hamiltonian.')
return SparsePauliOp(op_list, np.array(coeff_list))
|
class SinglePauli(ObservableBase):
"\n Observable constructed from a single Pauli operator of a single Qubit.\n\n **Equation for Z Pauli operator:**\n\n .. math::\n\n \\hat{H} = \\hat{Z_i} \\qquad \\text{or} \\qquad \\hat{H} = \\theta\\hat{Z_i}~~~~\n \\text{ (parameterized)}\n\n Can be parameterized or not, the four Pauli operators :math:`\\hat{X},\\hat{Y},\\hat{Z}`\n and :math:`\\hat{I}` are supported.\n\n Args:\n num_qubits (int): Number of qubits.\n qubit (int): Qubit on which the Pauli operator acts.\n op_str (str): Pauli operator to measure. Must be one of ``'I'``, ``'X'``, ``'Y'``, ``'Z'``\n (default: ``'Z'``).\n parameterized (bool): If True, the operator is parameterized (default: False).\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits.\n num_parameters (int): Number of trainable parameters in the single Pauli operator.\n qubit (int): Qubit on which the Pauli operator acts.\n op_str (str): Pauli operator to measure.\n parameterized (bool): If True, the operator is parameterized.\n\n "
def __init__(self, num_qubits: int, qubit: int=0, op_str: str='Z', parameterized: bool=False) -> None:
super().__init__(num_qubits)
self.qubit = qubit
self.op_str = op_str
self.parameterized = parameterized
if (self.op_str not in ['I', 'X', 'Y', 'Z']):
raise ValueError('Specified operator not supported')
@property
def num_parameters(self):
'The number of trainable parameters in the single Pauli operator'
if self.parameterized:
return 1
else:
return 0
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the Single Pauli operator.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['qubit'] = self.qubit
params['op_str'] = self.op_str
params['parameterized'] = self.parameterized
return params
def get_pauli(self, parameters: Union[(ParameterVector, np.ndarray)]) -> SparsePauliOp:
'\n Function for generating the SparsePauliOp expression of the single Pauli operator.\n\n Args:\n parameters (Union[ParameterVector, np.ndarray]): Parameters of the single\n Pauli operator.\n\n Return:\n SparsePauliOp expression of the specified single Pauli operator.\n '
i = self.qubit
if ((0 > i) or (self.num_qubits <= i)):
raise ValueError('Specified qubit out of range')
H = ('I' * self.num_qubits)
if self.parameterized:
return SparsePauliOp([((H[(i + 1):] + self.op_str) + H[:i])], [parameters[0]])
return SparsePauliOp([((H[(i + 1):] + self.op_str) + H[:i])])
|
class SingleProbability(ObservableBase):
'\n Observable for measuring the probability of being in state 0 or 1 of a specified qubit.\n\n **Equation as the operator is implemented:**\n\n .. math::\n\n \\hat{H} = 0.5(\\hat{I}_i+\\hat{Z}_i) (= \\ket{0}\\bra{0}_i) \\qquad \\text{or} \\qquad\n \\hat{H} = 0.5(\\hat{I}_i-\\hat{Z}_i) (= \\ket{1}\\bra{1}_i)\n\n Operator can be optionally parameterized.\n\n Args:\n num_qubits (int): Number of qubits.\n qubit (int): Qubit to measure the probability of.\n one_state (bool): If True, measure the probability of being in state 1, otherwise state 0\n (default: False).\n parameterized (bool): If True, the operator is parameterized (default: false).\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits.\n num_parameters (int): Number of trainable parameters in the single Pauli operator.\n qubit (int): Qubit to measure the probability of.\n one_state (bool): If True, measure the probability of being in state 1, otherwise state 0.\n parameterized (bool): If True, the operator is parameterized.\n '
def __init__(self, num_qubits: int, qubit: int=0, one_state: bool=False, parameterized: bool=False) -> None:
super().__init__(num_qubits)
self.qubit = qubit
self.one_state = one_state
self.parameterized = parameterized
@property
def num_parameters(self):
'Number of trainable parameters in the single probability operator.'
if self.parameterized:
return 1
else:
return 0
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the single probability operator.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['qubit'] = self.qubit
params['one_state'] = self.one_state
params['parameterized'] = self.parameterized
return params
def get_pauli(self, parameters: Union[(ParameterVector, np.ndarray)]=None) -> SparsePauliOp:
'\n Function for generating the SparsePauliOp expression of the single probability operator.\n\n Args:\n parameters (Union[ParameterVector, np.ndarray]): Parameters of the single\n probability operator.\n\n Return:\n SparsePauliOp expression of the specified single probability operator.\n '
i = self.qubit
if ((0 > i) or (self.num_qubits <= i)):
raise ValueError('Specified qubit out of range')
I = ('I' * self.num_qubits)
Z = ((I[(i + 1):] + 'Z') + I[:i])
if self.parameterized:
coeff = (0.5 * parameters[0])
else:
coeff = 0.5
if self.one_state:
return SparsePauliOp([I, Z], [coeff, (- coeff)])
else:
return SparsePauliOp([I, Z], [coeff, coeff])
|
class SummedPaulis(ObservableBase):
'\n Observable for summation of single Pauli operators.\n\n **Equation for Z Pauli operator:**\n\n .. math::\n \\hat{H} = a\\hat{I} + \\sum_i b_i \\hat{Z}_i\n\n Multiple Pauli operators can be specified by a tuple of strings, e.g. ``op_str=("X","Z")``:\n\n .. math::\n \\hat{H} = a\\hat{I} + \\sum_i b_i \\hat{X}_i + \\sum_i c_i \\hat{Z}_i\n\n The parameter can optionally be equal for the same kind of Pauli operators.\n\n Args:\n num_qubits (int): Number of qubits.\n op_str (Union[str,tuple[str]]): String of the Pauli operator that is measured.\n Possible values are ``"I"``, ``"X"``, ``"Y"``, ``"Z"``.\n Also a tuples of strings are allowed for multiple Pauli\n operators (default: ``"Z"``).\n full_sum (bool): If False, only one parameter is used for each Pauli operator,\n i.e. the sum is :math:`b\\sum_i \\hat{Z}_i`\n instead of :math:`\\sum_i b_i \\hat{Z}_i` (default: True).\n include_identity (bool): If True, the identity operator is included in the sum.\n (default: True)\n\n Attributes:\n -----------\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits.\n num_parameters (int): Number of trainable parameters in the summed Paulis operator.\n op_str (Union[str,tuple[str]]): String of the Pauli operator that is measured.\n full_sum (bool): If False, only one parameter is used for each Pauli operator.\n include_identity (bool): If True, the identity operator is included in the sum.\n\n '
def __init__(self, num_qubits: int, op_str: Union[(str, tuple[str])]='Z', full_sum: bool=True, include_identity: bool=True) -> None:
super().__init__(num_qubits)
self.op_str = op_str
self.full_sum = full_sum
self.include_identity = include_identity
for s in self.op_str:
if (s not in ['I', 'X', 'Y', 'Z']):
raise ValueError('Only Pauli operators I, X, Y, Z are allowed.')
@property
def num_parameters(self):
'Number of trainable parameters in the summed Pauli operator'
num_param = 0
if self.include_identity:
num_param += 1
if self.full_sum:
return (num_param + (len(self.op_str) * self.num_qubits))
else:
return (num_param + len(self.op_str))
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the single summed Pauli operator.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['op_str'] = self.op_str
params['full_sum'] = self.full_sum
params['include_identity'] = self.include_identity
return params
def get_pauli(self, parameters: Union[(ParameterVector, np.ndarray)]) -> SparsePauliOp:
'\n Function for generating the PauliOp expression of the summed Paulis operator.\n\n Args:\n parameters (Union[ParameterVector, np.ndarray]): Parameters of the summed\n Paulis operator.\n\n Return:\n PauliOp expression of the specified summed Paulis operator.\n '
def gen_string(i, op_str):
H = ('I' * self.num_qubits)
H = ((H[(i + 1):] + op_str) + H[:i])
return H
nparam = len(parameters)
ioff = 0
op_list = []
param_list = []
if self.include_identity:
op_list.append(('I' * self.num_qubits))
param_list.append(parameters[(ioff % nparam)])
ioff += 1
for op_str in self.op_str:
for i in range(self.num_qubits):
op_list.append(gen_string(i, op_str))
param_list.append(parameters[(ioff % nparam)])
if self.full_sum:
ioff += 1
if (not self.full_sum):
ioff += 1
return SparsePauliOp(op_list, np.array(param_list))
|
class SummedProbabilities(ObservableBase):
'\n Observable for summing single Qubit probabilities of binary states.\n\n **Equation for a sum of 0-states:**\n\n .. math::\n \\hat{H} = a\\hat{I} + \\sum_i b_i (\\ket{0}\\bra{0})_i\n\n States are implemented by :math:`\\ket{0}\\bra{0} = 0.5(\\hat{I}+\\hat{Z})`\n and :math:`\\ket{1}\\bra{1} = 0.5(\\hat{I}-\\hat{Z})`.\n\n The parameter can be optionally equal for all states.\n\n Args:\n num_qubits (int): Number of qubits.\n one_state (bool): If false the :math:`\\ket{0}\\bra{0}` state is measured,\n if true the :math:`\\ket{1}\\bra{1}` state is measured (default: False).\n full_sum (bool): If False, the parameter is the same for all states,\n i.e. the sum is :math:`b\\sum_i (\\ket{0}\\bra{0})_i`\n instead of :math:`\\sum_i b_i (\\ket{0}\\bra{0})_i`\n (default: True).\n include_identity (bool): If True, the identity operator is included in the sum.\n (default: True)\n\n Attributes:\n -----------\n\n Attributes:\n num_qubits (int): Number of qubits.\n num_parameters (int): Number of trainable parameters in the summed probabilities operator.\n one_state (bool): If false the :math:`\\ket{0}\\bra{0}` state is measured,\n if true the :math:`\\ket{1}\\bra{1}` state is measured.\n full_sum (bool): If False, the parameter is the same for all states.\n include_identity (bool): If True, the identity operator is included in the sum.\n\n '
def __init__(self, num_qubits: int, one_state=False, full_sum: bool=True, include_identity: bool=True) -> None:
super().__init__(num_qubits)
self.one_state = one_state
self.full_sum = full_sum
self.include_identity = include_identity
@property
def num_parameters(self):
'The number of trainable parameters in the summed probabilities operator'
num_param = 0
if self.include_identity:
num_param += 1
if self.full_sum:
num_param += self.num_qubits
else:
num_param += 1
return num_param
def get_params(self, deep: bool=True) -> dict:
'\n Returns hyper-parameters and their values of the single summed probabilities operator.\n\n Args:\n deep (bool): If True, also the parameters for\n contained objects are returned (default=True).\n\n Return:\n Dictionary with hyper-parameters and values.\n '
params = super().get_params()
params['one_state'] = self.one_state
params['full_sum'] = self.full_sum
params['include_identity'] = self.include_identity
return params
def get_pauli(self, parameters: Union[(ParameterVector, np.ndarray)]=None) -> SparsePauliOp:
'\n Function for generating the PauliOp expression of the summed probabilities operator.\n\n Args:\n parameters (Union[ParameterVector, np.ndarray]): Parameters of the summed\n probabilities operator.\n\n Returns:\n PauliOp expression of the specified summed probabilities operator.\n '
nparam = len(parameters)
op_list = []
coeff_list = []
if self.include_identity:
op_list.append(('I' * self.num_qubits))
coeff_list.append(parameters[(0 % nparam)])
ioff = 1
for i in range(self.num_qubits):
I = ('I' * self.num_qubits)
Z = ((I[(i + 1):] + 'Z') + I[:i])
if self.one_state:
op_list.append(I)
coeff_list.append((parameters[(ioff % nparam)] * 0.5))
op_list.append(Z)
coeff_list.append(((- parameters[(ioff % nparam)]) * 0.5))
else:
op_list.append(I)
coeff_list.append((parameters[(ioff % nparam)] * 0.5))
op_list.append(Z)
coeff_list.append((parameters[(ioff % nparam)] * 0.5))
if self.full_sum:
ioff += 1
return SparsePauliOp(op_list, coeff_list)
|
class ApproxGradientBase():
'Base class for evaluating approximated gradients'
def gradient(self, x: np.ndarray) -> np.ndarray:
'Function that calculates the approximated gradient for given input x\n\n Args:\n x (np.ndarray): Input location at which the gradient is calculated\n\n Returns:\n Approximated gradient with the same dimension as x (np.ndarray)\n\n '
raise NotImplementedError()
def __call__(self, x: np.ndarray) -> np.ndarray:
return self.gradient(x)
|
class FiniteDiffGradient(ApproxGradientBase):
"\n Class for evaluating the finite differences gradient.\n\n Possible implementations are:\n\n Forward: [f(x+eps)-f(x)]/eps\n Backwards: [f(x)-f(x-eps)]/eps\n Central (default): [f(x+eps)-f(x-eps)]/2*eps\n Five-point: [-f(x+2eps)+8f(x+eps)-8f(x-eps)+f(x-2eps)]/12eps\n\n Args:\n fun (callable): Callable function for the gradient evaluation\n eps (float): Step for finite differences\n formula (str): type of finite differences. Possible values for type are\n 'forward', 'backwards', 'central', and 'five-point'\n "
def __init__(self, fun: callable, eps: float=0.01, formula: str='central') -> None:
self.fun = fun
self.eps = eps
self.formula = formula
if (formula not in ('central', 'forward', 'backwards', 'five-point')):
raise ValueError(('Wrong value of formula: ' + formula))
def gradient(self, x: np.ndarray) -> np.ndarray:
'Function that calculates the approximated gradient for given input x\n\n Args:\n x (np.ndarray): Input location at which the gradient is calculated\n\n Returns:\n Approximated gradient with the same dimension as x (np.ndarray)\n\n '
if (len(x.shape) != 1):
raise ValueError('Unsupported shape of x!')
if (self.formula == 'forward'):
f0 = self.fun(x)
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((self.fun((x + dx)) - f0) / self.eps)
elif (self.formula == 'backwards'):
f0 = self.fun(x)
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((f0 - self.fun((x - dx))) / self.eps)
elif (self.formula == 'central'):
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((self.fun((x + dx)) - self.fun((x - dx))) / (2.0 * self.eps))
elif (self.formula == 'five-point'):
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((((((- 1.0) * self.fun((x + (2.0 * dx)))) + (8.0 * self.fun((x + (1.0 * dx))))) - (8.0 * self.fun((x - (1.0 * dx))))) + (1.0 * self.fun((x - (2.0 * dx))))) / (12.0 * self.eps))
else:
raise ValueError(('Wrong value of type: ' + self.formula))
return g
|
class StochasticPerturbationGradient(ApproxGradientBase):
'\n Class for evaluating the stochastic perturbation gradient estimation.\n\n g_i = f(x+eps*r)-f(x-eps*r)/2*eps*r_i with random vector r\n\n This is used in the SPSA optimization.\n\n Args:\n fun (callable): Callable function for the gradient evaluation\n eps (float): Step for difference\n seed (int): Seed for the random vector generation\n '
def __init__(self, fun: callable, eps: float=0.1, seed: int=0) -> None:
self.fun = fun
self.eps = eps
self.rng = np.random.default_rng(seed=seed)
def set_eps(self, eps) -> None:
'Setter for the eps value (is often dynamically adjusted)'
self.eps = eps
def gradient(self, x: np.ndarray) -> np.ndarray:
'Function that calculates the approximated gradient for given input x\n\n Args:\n x (np.ndarray): Input location at which the gradient is calculated\n\n Returns:\n Approximated gradient with the same dimension as x (np.ndarray)\n\n '
if (len(x.shape) != 1):
raise ValueError('Unsupported shape of x!')
pert = self.rng.random(len(x))
f1 = self.fun((x + (self.eps * pert)))
f2 = self.fun((x - (self.eps * pert)))
return np.divide((f1 - f2), ((2.0 * self.eps) * pert))
|
def default_callback(*args):
'Default callback function.'
pass
|
class OptimizerResult():
'Class for holding the final result of the optimization'
def __init__(self):
self.x = None
self.nit = 0
self.fun = 0.0
|
class OptimizerBase(abc.ABC):
'Base class for QNN optimizers.'
def minimize(self, fun: callable, x0: np.ndarray, grad: callable=None, bounds=None) -> OptimizerResult:
'Function to minimize a given function.\n\n Args:\n fun (callable): Function to minimize.\n x0 (numpy.ndarray): Initial guess.\n grad (callable): Gradient of the function to minimize.\n bounds (sequence): Bounds for the parameters.\n\n Returns:\n Result of the optimization in class:`OptimizerResult` format.\n '
raise NotImplementedError()
def set_callback(self, callback):
'Set the callback function.'
self.callback = callback
|
class IterativeMixin():
'Mixin for iteration based optimizers.'
def __init__(self):
self.iteration = 0
|
class StepwiseMixin(IterativeMixin):
'Mixin for optimizer for which we can execute single steps.'
def step(self, **kwargs):
'Perform one update step.'
raise NotImplementedError()
|
class SGDMixin(StepwiseMixin, abc.ABC):
'Mixin for stochastic gradient descent based optimizers.'
def step(self, **kwargs):
'Perform one update step.\n\n Args:\n x: Current value\n grad: Precomputed gradient\n\n Returns:\n Updated x\n '
if ('x' in kwargs):
x = kwargs['x']
else:
raise TypeError('x argument is missing in step function.')
if ('grad' in kwargs):
grad = kwargs['grad']
else:
raise TypeError('grad argument is missing in step function.')
update = self._get_update(grad)
x_return = (x + update)
self.iteration += 1
self._update_lr()
return x_return
def reset(self):
'\n Resets the object to its initial state.\n\n This function does not take any parameters.\n\n Returns:\n None: This function does not return anything.\n '
pass
@abc.abstractmethod
def _get_update(self, grad: np.ndarray) -> np.ndarray:
'Function that returns the update for a given gradient.'
raise NotImplementedError()
@abc.abstractmethod
def _update_lr(self) -> None:
'Function for updating the learning rate.'
raise NotImplementedError()
|
class WrappedOptimizerBase(OptimizerBase, IterativeMixin):
'Base class for wrapped optimizers.\n\n Overwrites the set_callback function to additionally increase the iteration counter.\n '
def set_callback(self, callback):
'Set the callback function with additional iteration counter increasing.'
def callback_wrapper(*args):
nonlocal self
self.iteration += 1
callback(*args)
super().set_callback(callback_wrapper)
|
class SLSQP(WrappedOptimizerBase):
"Wrapper class for scipy's SLSQP implementation.\n\n Args:\n options (dict): Options for the SLSQP optimizer.\n The options are the same as for :meth:`scipy.optimize.minimize`\n "
def __init__(self, options: dict=None, callback=default_callback):
super().__init__()
if (options is None):
options = {}
self.tol = options.get('tol', 1e-06)
if ('tol' in options):
options.pop('tol')
self.options = options
self.set_callback(callback)
def minimize(self, fun: callable, x0: np.ndarray, grad: callable=None, bounds=None) -> OptimizerResult:
'\n Function to minimize a given function using the SLSQP optimizer. Is wrapped from scipy.\n\n Args:\n fun (callable): Function to minimize.\n x0 (numpy.ndarray): Initial guess.\n grad (callable): Gradient of the function to minimize.\n bounds (sequence): Bounds for the parameters.\n\n Returns:\n Result of the optimization in class:`OptimizerResult` format.\n '
scipy_result = minimize(fun, jac=grad, x0=x0, method='SLSQP', options=self.options, bounds=bounds, tol=self.tol, callback=self.callback)
result = OptimizerResult()
result.x = scipy_result.x
result.nit = scipy_result.nit
result.fun = scipy_result.fun
return result
|
class LBFGSB(WrappedOptimizerBase):
"Wrapper class for scipy's L-BFGS-B implementation.\n\n Args:\n options (dict): Options for the L-BFGS-B optimizer.\n The options are the same as for :meth:`scipy.optimize.minimize`\n "
def __init__(self, options: dict=None, callback=default_callback):
super().__init__()
if (options is None):
options = {}
self.tol = options.get('tol', 1e-06)
if ('tol' in options):
options.pop('tol')
self.options = options
self.set_callback(callback)
def minimize(self, fun: callable, x0: np.ndarray, grad: callable=None, bounds=None) -> OptimizerResult:
'\n Function to minimize a given function using the L-BFGS-B optimizer. Is wrapped from scipy.\n\n Args:\n fun (callable): Function to minimize.\n x0 (numpy.ndarray): Initial guess.\n grad (callable): Gradient of the function to minimize.\n bounds (sequence): Bounds for the parameters.\n\n Returns:\n Result of the optimization in class:`OptimizerResult` format.\n '
scipy_result = minimize(fun, jac=grad, x0=x0, method='L-BFGS-B', options=self.options, bounds=bounds, tol=self.tol, callback=self.callback)
result = OptimizerResult()
result.x = scipy_result.x
result.nit = scipy_result.nit
result.fun = scipy_result.fun
return result
|
class SPSA(WrappedOptimizerBase):
"Wrapper class for Qiskit's SPSA implementation.\n\n Args:\n options (dict): Options for the SPSA optimizer.\n The options are the same as for :meth:`qiskit.algorithms.optimizers.SPSA`\n "
def __init__(self, options: dict=None, callback=default_callback):
super().__init__()
self.set_callback(callback)
if (options is None):
options = {}
self.options = options
self.maxiter = options.get('maxiter', 100)
self.blocking = options.get('blocking', False)
self.allowed_increase = options.get('allowed_increase', None)
self.trust_region = options.get('trust_region', False)
self.learning_rate = options.get('learning_rate', None)
self.perturbation = options.get('perturbation', None)
self.last_avg = options.get('last_avg', 1)
self.resamplings = options.get('resamplings', 1)
self.perturbation_dims = options.get('perturbation_dims', None)
self.second_order = options.get('second_order', False)
self.regularization = options.get('regularization', None)
self.hessian_delay = options.get('hessian_delay', 0)
self.lse_solver = options.get('lse_solver', None)
self.initial_hessian = options.get('initial_hessian', None)
self.callback = callback
def minimize(self, fun: callable, x0: np.ndarray, grad: callable=None, bounds=None) -> OptimizerResult:
"\n Function to minimize a given function using Qiskit's SPSA optimizer.\n\n Args:\n fun (callable): Function to minimize.\n x0 (numpy.ndarray): Initial guess.\n grad (callable): Gradient of the function to minimize.\n bounds (sequence): Bounds for the parameters.\n\n Returns:\n Result of the optimization in class:`OptimizerResult` format.\n "
spsa = qiskit_optimizers.SPSA(maxiter=self.maxiter, blocking=self.blocking, allowed_increase=self.allowed_increase, trust_region=self.trust_region, learning_rate=self.learning_rate, perturbation=self.perturbation, last_avg=self.last_avg, resamplings=self.resamplings, perturbation_dims=self.perturbation_dims, second_order=self.second_order, regularization=self.regularization, hessian_delay=self.hessian_delay, lse_solver=self.lse_solver, initial_hessian=self.initial_hessian, callback=self.callback)
result_qiskit = spsa.minimize(fun=fun, x0=x0, jac=grad, bounds=bounds)
result = OptimizerResult()
result.x = result_qiskit.x
result.nit = result_qiskit.nit
result.fun = result_qiskit.fun
return result
|
def calc_var_dg(qnn, x, param_op, n_sample=100, p_lim=None, p_val=None, p_index=None, verbose=1, seed=0):
'\n Calculates the variance and the mean of the gradient of the given qnn.\n\n Args:\n qnn : QNN object from which the variance of the gradient is calculated\n x : Single value or array of the x values of the QNN\n param_op : Values of the cost-operator\n n_sample = 100 : Number of samples considered for the variance computation of the gradient\n p_lim = None : Limits of the unified sampling of parameters for the variance calculation\n p_val = None : Array containing the parameters for sampling.\n If equal None, the parameters are chosen randomly (default).\n p_index = None : Array of indices of the parameters chosen for the derivative.\n If equal None (default), the parameters are chosen randomly\n verbose = 1 : Verbosity of the method.\n seed = 0: Random seed the random operations.\n\n Returns:\n Variance of the gradient values\n Mean value of the absolute values of the gradient\n '
seed_backup = np.random.get_state()
np.random.seed(seed)
if (p_lim is None):
p_lim = ((- np.pi), np.pi)
if (p_val is not None):
(p_val, multi) = adjust_parameters(p_val, qnn.num_parameters)
if (not isinstance(p_val, np.ndarray)):
p = np.array(p_val)
else:
p = p_val
if (p.shape[1] != qnn.num_parameters):
raise ValueError('Wrong size of the input p_val')
else:
p = np.random.uniform(low=p_lim[0], high=p_lim[1], size=(n_sample, qnn.num_parameters))
if (p_index is None):
p_index = np.random.randint(0, qnn.num_parameters, size=n_sample)
np.random.set_state(seed_backup)
grad_val = []
for iter in range(p.shape[0]):
grad_val.append(qnn.evaluate_diff_tuple((qnn.parameters[p_index[iter]],), x, p[iter], param_op))
np_array = np.array(grad_val).flatten()
return (np.var(np_array), np.mean(np.abs(np_array)))
|
def get_barren_slope(pqc_func, cost_op_func, x, QI, num_qubits, layer_fac=5, n_sample=100):
'\n Calculates the variance of the gradient for different number of qubits.\n Returns a linear regression model to the data\n\n Args:\n pqc_func : function which returns the initialized pqc class for a given number of qubits and layers\n pqc_func(number_of_qubits, number_of_layers)\n cost_op_func : function which returns the initialized cost_op class for a given number of qubits\n cost_op_func(number_of_qubits)\n x : single value or array of the x values\n QI : Quantum Instance for evaluating the quantum circuits\n num_qubits : Either a list containing the considered qubits or\n an integer value that triggers a list of [2,...,num_qubits] qubits\n layer_fac = 5 : (optional) Number of layer is given by number of qubits times layer_fac\n n_sample = 100 : (optional) Number of samples considered for the variance computation of the gradient\n\n Returns:\n Model containing the linear regression (based on :class:`sklearn.linear_model.LinearRegression`)\n Numpy array with the variance values\n '
if isinstance(num_qubits, int):
num_qubits_ = np.arange(2, (num_qubits + 1))
else:
num_qubits_ = num_qubits
var = []
mean = []
for iqubits in num_qubits_:
number_of_layers = (layer_fac * iqubits)
pqc_ = pqc_func(iqubits, number_of_layers)
cost_op_ = cost_op_func(iqubits)
qnn = QNN(pqc_, cost_op_, QI)
(mean_val, var_val) = calc_var_dg(qnn, x=x, param_op=np.ones(cost_op_.get_number_of_parameters()), n_sample=n_sample)
var.append(var_val)
mean.append(mean_val)
model = LinearRegression()
model.fit(np.array(num_qubits_).reshape(((- 1), 1)), np.log(np.array(var)))
model.x_val = np.array(num_qubits_).reshape(((- 1), 1))
model.y_val = np.log(np.array(var))
return (model, np.array(var))
|
def get_barren_slop_from_model(model):
'\n Returns the slop of the linear regression of the barren plateau fit\n\n Args:\n model : linear regression model created by get_barren_slope\n\n Returns:\n slop of the linear regression\n '
return model.coef_[0]
|
def get_barren_plot_from_model(model, plt):
'\n Plots the regression output and the measured numbers into a semilogarithmic plot-\n\n Args:\n model : linear regression model created by get_barren_slope\n plt : matplotplib.pyplot object\n\n Returns:\n matplotlib handles to the two plots\n '
y = np.exp(model.predict(model.x_val))
handle = []
handle.append(plt.semilogy(model.x_val, y))
handle.append(plt.semilogy(model.x_val, np.exp(model.y_val)))
return handle
|
def get_barren_layer(pqc_func, cost_op_func, x, QI, num_qubits, num_layers, n_sample=100):
'\n Calculate the variance of the gradient for different number of qubits and layers.\n Can be used to create a plot for visualizing the plateauing for specific number of layers.\n\n Args:\n pqc_func : function which returns the initialized pqc class for a given number of qubits and layers\n pqc_func(number_of_qubits, number_of_layers)\n cost_op_func : function which returns the initialized cost_op class for a given number of qubits\n cost_op_func(number_of_qubits)\n x : single value or array of the x values\n QI : Quantum Instance for evaluating the quantum circuits\n num_qubits : Either a list containing the considered qubits or\n an integer value that triggers a list of [2,...,num_qubits] qubits\n num_layers : Either a list containing the considered qubits or\n an integer value that triggers a list of [2,...,num_qubits] qubits\n n_sample = 100 : (optional) Number of samples considered for the variance computation of the gradient\n\n Returns:\n Returns two dictionaries with the variance and the mean absolute value for the given layers\n '
if isinstance(num_qubits, int):
num_qubits_ = np.arange(2, (num_qubits + 1))
else:
num_qubits_ = num_qubits
if isinstance(num_layers, int):
num_layers_ = np.arange(1, (num_layers + 1), 5)
else:
num_layers_ = num_layers
var = {}
mean = {}
for iqubits in num_qubits_:
qubit_var = []
qubit_mean = []
for ilayers in num_layers_:
number_of_layers = ilayers
pqc_ = pqc_func(iqubits, number_of_layers)
cost_op_ = cost_op_func(iqubits)
qnn = QNN(pqc_, cost_op_, QI)
(mean_val, var_val) = calc_var_dg(qnn, x=x, param_op=np.ones(cost_op_.get_number_of_parameters()), n_sample=n_sample)
qubit_var.append(var_val)
qubit_mean.append(mean_val)
var[iqubits] = qubit_var
mean[iqubits] = qubit_mean
return (var, mean)
|
def get_barren_layer_plot(var, num_layers, plt):
'\n Creates a barren plateau visualization with the\n number of layers on the X-axis. Returns a semilogarithmic\n plot of the variance of the gradient (see get_barren_layer).\n\n Args:\n var : dictionary containing the variance connecting qubits\n and the variance for the layers\n num_layers : Either a list containing the considered\n number of layers or an integer value that triggers a list of\n [1,5,num_layers] qubits\n plt : matplotplib.pyplot object\n\n Returns:\n Handles of the different curves\n '
if isinstance(num_layers, int):
num_layers_ = np.arange(1, (num_layers + 1), 5)
else:
num_layers_ = num_layers
handles = {}
for i in var.keys():
handles[i] = plt.semilogy(num_layers_, var[i])
return handles
|
class BaseQNN(BaseEstimator, ABC):
'Base Class for Quantum Neural Networks.\n\n Args:\n encoding_circuit : Parameterized quantum circuit in encoding circuit format\n operator : Operator that are used in the expectation value of the QNN. Can be a list for\n multiple outputs.\n executor : Executor instance\n optimizer : Optimizer instance\n param_ini : Initialization values of the parameters of the PQC\n param_op_ini : Initialization values of the cost operator\n batch_size : Number of data points in each batch, for SGDMixin optimizers\n epochs : Number of epochs of SGD to perform, for SGDMixin optimizers\n shuffle : If True, data points get shuffled before each epoch (default: False),\n for SGDMixin optimizers\n opt_param_op : If True, operators parameters get optimized\n variance : Variance factor\n parameter_seed : Seed for the random number generator for the parameter initialization\n caching : If True, the results of the QNN are cached.\n pretrained : Set to true if the supplied parameters are already trained.\n callback (Union[Callable, str, None], default=None): A callback for the optimization loop.\n Can be either a Callable, "pbar" (which uses a :class:`tqdm.tqdm` process bar) or None.\n If None, the optimizers (default) callback will be used.\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, operator: Union[(ObservableBase, list[ObservableBase])], executor: Executor, loss: LossBase, optimizer: OptimizerBase, param_ini: Union[(np.ndarray, None)]=None, param_op_ini: Union[(np.ndarray, None)]=None, batch_size: int=None, epochs: int=None, shuffle: bool=None, opt_param_op: bool=True, variance: Union[(float, Callable)]=None, shot_control: ShotControlBase=None, parameter_seed: Union[(int, None)]=0, caching: bool=True, pretrained: bool=False, callback: Union[(Callable, str, None)]=None, **kwargs) -> None:
super().__init__()
self.encoding_circuit = encoding_circuit
self.operator = operator
self.loss = loss
self.optimizer = optimizer
self.variance = variance
self.shot_control = shot_control
self.parameter_seed = parameter_seed
if (param_ini is None):
self.param_ini = encoding_circuit.generate_initial_parameters(seed=parameter_seed)
if pretrained:
raise ValueError('If pretrained is True, param_ini must be provided!')
else:
self.param_ini = param_ini
self._param = self.param_ini.copy()
if (param_op_ini is None):
if pretrained:
raise ValueError('If pretrained is True, param_op_ini must be provided!')
if isinstance(operator, list):
self.param_op_ini = np.concatenate([operator.generate_initial_parameters(seed=((parameter_seed + i) + 1)) for (i, operator) in enumerate(operator)])
else:
self.param_op_ini = operator.generate_initial_parameters(seed=(parameter_seed + 1))
else:
self.param_op_ini = param_op_ini
self._param_op = self.param_op_ini.copy()
if ((not isinstance(optimizer, SGDMixin)) and any(((param is not None) for param in [batch_size, epochs, shuffle]))):
warn(f'{optimizer.__class__.__name__} is not of type SGDMixin, thus batch_size, epochs and shuffle will be ignored.')
self.batch_size = batch_size
self.epochs = epochs
self.shuffle = shuffle
self.opt_param_op = opt_param_op
self.caching = caching
self.pretrained = pretrained
self.executor = executor
self._qnn = QNN(self.encoding_circuit, self.operator, executor, result_caching=self.caching)
self.shot_control = shot_control
if (self.shot_control is not None):
self.shot_control.set_executor(self.executor)
self.callback = callback
if self.callback:
if callable(self.callback):
self.optimizer.set_callback(self.callback)
elif (self.callback == 'pbar'):
self._pbar = None
if (isinstance(self.optimizer, SGDMixin) and self.batch_size):
self._total_iterations = self.epochs
else:
self._total_iterations = self.optimizer.options.get('maxiter', 100)
def pbar_callback(*args):
self._pbar.update(1)
self.optimizer.set_callback(pbar_callback)
elif isinstance(self.callback, str):
raise ValueError(f'Unknown callback string value {self.callback}')
else:
raise TypeError(f'Unknown callback type {type(self.callback)}')
update_params = (self.get_params().keys() & kwargs.keys())
if update_params:
self.set_params(**{key: kwargs[key] for key in update_params})
self._is_fitted = self.pretrained
def __getstate__(self):
state = self.__dict__.copy()
del state['_pbar']
return state
def __setstate__(self, state) -> None:
state.update({'_pbar': None})
return super().__setstate__(state)
@property
def param(self) -> np.ndarray:
'Parameters of the PQC.'
return self._param
@property
def param_op(self) -> np.ndarray:
'Parameters of the cost operator.'
return self._param_op
@property
def num_parameters(self) -> int:
'Number of parameters of the PQC.'
return self._qnn.num_parameters
@property
def num_parameters_observable(self) -> int:
'Number of parameters of the observable.'
return self._qnn.num_parameters_observable
def fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
'Fit a new model to data.\n\n This method will reinitialize the models parameters and fit it to the provided data.\n\n Args:\n X: Input data\n y: Labels\n weights: Weights for each data point\n '
self._param = self.param_ini.copy()
self._param_op = self.param_op_ini.copy()
self._is_fitted = False
self._fit(X, y, weights)
def get_params(self, deep: bool=True) -> dict:
'\n Returns a dictionary of parameters for the current object.\n\n Parameters:\n deep: If True, includes the parameters from the base class.\n\n Returns:\n dict: A dictionary of parameters for the current object.\n '
params = super().get_params(deep=False)
if deep:
params.update(self._qnn.get_params(deep=True))
return params
def set_params(self: BaseQNN, **params) -> BaseQNN:
'\n Sets the hyper-parameters of the BaseQNN.\n\n Args:\n params: Hyper-parameters of the BaseQNN.\n\n Returns:\n updated BaseQNN\n '
valid_params = self.get_params().keys()
for key in params.keys():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
self_params = (params.keys() & self.get_params(deep=False).keys())
for key in self_params:
setattr(self, key, params[key])
qnn_params = (params.keys() & self._qnn.get_params(deep=True).keys())
if qnn_params:
self._qnn.set_params(**{key: params[key] for key in qnn_params})
if (self.encoding_circuit.num_parameters != len(self.param_ini)):
self.param_ini = self.encoding_circuit.generate_initial_parameters(seed=self.parameter_seed)
if isinstance(self.operator, list):
num_op_parameters = sum((operator.num_parameters for operator in self.operator))
if (num_op_parameters != len(self.param_op_ini)):
self.param_op_ini = np.concatenate([operator.generate_initial_parameters(seed=self.parameter_seed) for operator in self.operator])
elif (self.operator.num_parameters != len(self.param_op_ini)):
self.param_op_ini = self.operator.generate_initial_parameters(seed=self.parameter_seed)
if isinstance(self.optimizer, SGDMixin):
self.optimizer.reset()
self._is_fitted = False
return self
@abstractmethod
def _fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
'Internal fit function.'
raise NotImplementedError()
|
class LossBase(abc.ABC):
'Base class implementation for loss functions.'
def __init__(self):
self._opt_param_op = True
def set_opt_param_op(self, opt_param_op: bool=True):
'Sets the `opt_param_op` flag.\n\n Args:\n opt_param_op (bool): True, if operator has trainable parameters\n '
self._opt_param_op = opt_param_op
@property
def loss_variance_available(self) -> bool:
'Returns True if the loss function has a variance function.'
return False
@property
@abc.abstractmethod
def loss_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss calculation.'
raise NotImplementedError()
@property
def variance_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss calculation.'
raise NotImplementedError()
@property
@abc.abstractmethod
def gradient_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss gradient calculation.'
raise NotImplementedError()
@abc.abstractmethod
def value(self, value_dict: dict, **kwargs) -> float:
'Calculates and returns the loss value.'
raise NotImplementedError()
def variance(self, value_dict: dict, **kwargs) -> float:
'Calculates and returns the variance of the loss value.'
raise NotImplementedError()
@abc.abstractmethod
def gradient(self, value_dict: dict, **kwargs) -> Union[(np.ndarray, tuple[(np.ndarray, np.ndarray)])]:
'Calculates and returns the gradient of the loss.'
raise NotImplementedError()
def __add__(self, x):
'Adds two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(self, x, '+')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(self, ConstantLoss(x), '+')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __radd__(self, x):
'Adds two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(x, self, '+')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(ConstantLoss(x), self, '+')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __mul__(self, x):
'Multiplies two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(self, x, '*')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(self, ConstantLoss(x), '*')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __rmul__(self, x):
'Multiplies two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(x, self, '*')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(ConstantLoss(x), self, '*')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __sub__(self, x):
'Subtracts two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(self, x, '-')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(self, ConstantLoss(x), '-')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __rsub__(self, x):
'Subtracts two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(x, self, '-')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(ConstantLoss(x), self, '-')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __truediv__(self, x):
'Divides two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(self, x, '/')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(self, ConstantLoss(x), '/')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
def __rtruediv__(self, x):
'Divides two loss functions.'
if isinstance(x, LossBase):
return _ComposedLoss(x, self, '/')
elif (isinstance(x, float) or isinstance(x, int) or callable(x)):
return _ComposedLoss(ConstantLoss(x), self, '/')
else:
raise ValueError('Only the addition with another loss functions are allowed!')
|
class _ComposedLoss(LossBase):
'Special class for composed loss functions\n\n Class for addition, multiplication, subtraction, and division of loss functions.\n\n Args:\n l1 (LossBase): First loss function\n l2 (LossBase): Second loss function\n composition (str): Composition of the loss functions ("+", "-", "*", "/")\n\n '
def __init__(self, l1: LossBase, l2: LossBase, composition: str='+'):
super().__init__()
self._l1 = l1
self._l2 = l2
self._composition = composition
self._opt_param_op = (self._l1._opt_param_op or self._l2._opt_param_op)
self._l1.set_opt_param_op(self._opt_param_op)
self._l2.set_opt_param_op(self._opt_param_op)
def set_opt_param_op(self, opt_param_op: bool=True):
'Sets the `opt_param_op` flag.\n\n Args:\n opt_param_op (bool): True, if operator has trainable parameters\n '
self._opt_param_op = opt_param_op
self._l1.set_opt_param_op(opt_param_op)
self._l2.set_opt_param_op(opt_param_op)
@property
def loss_variance_available(self) -> bool:
if (self._composition in ('*', '/')):
return False
else:
return (self._l1.loss_variance_available and self._l2.loss_variance_available)
@property
def loss_args_tuple(self) -> tuple:
'Returns evaluation tuple for composed loss calculation.'
return tuple(set((self._l1.loss_args_tuple + self._l2.loss_args_tuple)))
@property
def variance_args_tuple(self) -> tuple:
'Returns evaluation tuple for composed variance calculation.'
if (self._composition in ('*', '/')):
raise ValueError('Variance not available for composition: ', self._composition)
else:
return tuple(set((self._l1.variance_args_tuple + self._l2.variance_args_tuple)))
@property
def gradient_args_tuple(self) -> tuple:
'Returns evaluation tuple for composed gradient calculation.'
return tuple(set((self._l1.gradient_args_tuple + self._l2.gradient_args_tuple)))
def value(self, value_dict: dict, **kwargs) -> float:
'Calculates and returns the composed loss value.\n\n Args:\n value_dict (dict): Dictionary with values for the evaluation of the loss function\n\n Returns:\n float: Composed loss value\n '
value_l1 = self._l1.value(value_dict, **kwargs)
value_l2 = self._l2.value(value_dict, **kwargs)
if (self._composition == '*'):
return (value_l1 * value_l2)
elif (self._composition == '/'):
return (value_l1 / value_l2)
elif (self._composition == '+'):
return (value_l1 + value_l2)
elif (self._composition == '-'):
return (value_l1 - value_l2)
else:
raise ValueError('Unknown composition: ', self._composition)
def variance(self, value_dict: dict, **kwargs) -> float:
'Calculates and returns the composed variance value.\n\n Args:\n value_dict (dict): Dictionary with values for the evaluation of the loss function\n\n Returns:\n float: Composed variance value\n '
if (self._composition in ('*', '/')):
raise ValueError('Variance not available for composition: ', self._composition)
var_l1 = self._l1.variance(value_dict, **kwargs)
var_l2 = self._l2.variance(value_dict, **kwargs)
if (self._composition == '+'):
return (var_l1 + var_l2)
elif (self._composition == '-'):
return (var_l1 + var_l2)
else:
raise ValueError('Unknown composition: ', self._composition)
def gradient(self, value_dict: dict, **kwargs) -> Union[(np.ndarray, tuple[(np.ndarray, np.ndarray)])]:
'Calculates and returns the gradient of the composed loss.\n\n Args:\n value_dict (dict): Dictionary with values for the evaluation of the\n loss function gradient\n\n Returns:\n Union[np.ndarray, tuple[np.ndarray, np.ndarray]]: Gradient of the composed\n loss function\n\n '
grad_l1 = self._l1.gradient(value_dict, **kwargs)
grad_l2 = self._l2.gradient(value_dict, **kwargs)
if (self._composition in ('*', '/')):
value_l1 = self._l1.value(value_dict, **kwargs)
value_l2 = self._l2.value(value_dict, **kwargs)
if (isinstance(grad_l1, tuple) and isinstance(grad_l2, tuple)):
if (self._composition == '*'):
return tuple([np.add((grad_l1[i] * value_l2), (grad_l2[i] * value_l1)) for i in range(len(grad_l1))])
elif (self._composition == '/'):
return tuple([np.subtract((grad_l1[i] / value_l2), ((value_l1 / value_l2) * grad_l2[i])) for i in range(len(grad_l1))])
elif (self._composition == '+'):
return tuple([np.add(grad_l1[i], grad_l2[i]) for i in range(len(grad_l1))])
elif (self._composition == '-'):
return tuple([np.subtract(grad_l1[i], grad_l2[i]) for i in range(len(grad_l1))])
else:
raise ValueError('Unknown composition: ', self._composition)
elif ((not isinstance(grad_l1, tuple)) and (not isinstance(grad_l2, tuple))):
if (self._composition == '*'):
return np.add((grad_l1 * value_l2), (grad_l2 * value_l1))
elif (self._composition == '/'):
return np.subtract((grad_l1 / value_l2), ((value_l1 / value_l2) * grad_l2))
elif (self._composition == '+'):
return np.add(grad_l1, grad_l2)
elif (self._composition == '-'):
return np.subtract(grad_l1, grad_l2)
else:
raise ValueError('Unknown composition: ', self._composition)
else:
raise ValueError('Gradient output structure types do not match!')
|
class ConstantLoss(LossBase):
'Class for constant or independent loss functions.\n\n Args:\n value (Union[int, float, Callable[[int],float]]): Constant value or function depending\n on the iterations returning a constant value.\n '
def __init__(self, value: Union[(int, float, Callable[([int], float)])]=0.0):
super().__init__()
if callable(value):
self._value = value
else:
self._value = float(value)
@property
def loss_variance_available(self) -> bool:
'Returns True if the loss function has a variance function.'
return True
@property
def loss_args_tuple(self) -> tuple:
'Returns empty evaluation tuple for loss calculation.'
return tuple()
@property
def variance_args_tuple(self) -> tuple:
'Returns empty evaluation tuple for variance calculation.'
return tuple()
@property
def gradient_args_tuple(self) -> tuple:
'Returns empty evaluation tuple for gradient calculation.'
return tuple()
def value(self, value_dict: dict, **kwargs) -> float:
'Returns constant or iteration dependent loss value\n\n Args:\n value_dict (dict): Contains calculated values of the model\n iteration (int): iteration number, if value is a callable function\n '
if callable(self._value):
if ('iteration' not in kwargs):
raise AttributeError('If value is callable, iteration is required.')
return self._value(kwargs['iteration'])
return self._value
def variance(self, value_dict: dict, **kwargs) -> float:
'Returns zero variance of the constant loss function.'
return 0.0
def gradient(self, value_dict: dict, **kwargs) -> Union[(np.ndarray, tuple[(np.ndarray, np.ndarray)])]:
'Returns zero gradient value\n\n Args:\n value_dict (dict): Contains calculated values of the model\n '
dp = np.zeros(value_dict['param'].shape)
dop = np.zeros(value_dict['param_op'].shape)
if self._opt_param_op:
return (dp, dop)
return dp
|
class SquaredLoss(LossBase):
'Squared loss for regression.'
@property
def loss_variance_available(self) -> bool:
'Returns True since the squared loss function has a variance function.'
return True
@property
def loss_args_tuple(self) -> tuple:
'Returns evaluation tuple for the squared loss calculation.'
return ('f',)
@property
def variance_args_tuple(self) -> tuple:
'Returns evaluation tuple for the squared loss variance calculation.'
return ('f', 'var')
@property
def gradient_args_tuple(self) -> tuple:
'Returns evaluation tuple for the squared loss gradient calculation.'
if self._opt_param_op:
return ('f', 'dfdp', 'dfdop')
return ('f', 'dfdp')
def value(self, value_dict: dict, **kwargs) -> float:
'Calculates the squared loss.\n\n This function calculates the squared loss between the values in value_dict and ground_truth\n as\n\n .. math::\n \\sum_i w_i \\left|f\\left(x_i\\right)-f_ref\\left(x_i\\right)\\right|^2\n\n Args:\n value_dict (dict): Contains calculated values of the model\n ground_truth (np.ndarray): The true values :math:`f_ref\\left(x_i\\right)`\n weights (np.ndarray): Weight for each data point, if None all data points count the same\n\n Returns:\n Loss value\n '
if ('ground_truth' not in kwargs):
raise AttributeError('SquaredLoss requires ground_truth.')
ground_truth = kwargs['ground_truth']
if (('weights' in kwargs) and (kwargs['weights'] is not None)):
weights = kwargs['weights']
else:
weights = np.ones_like(ground_truth)
return np.sum(np.multiply(np.square((value_dict['f'] - ground_truth)), weights))
def variance(self, value_dict: dict, **kwargs) -> float:
'Calculates the approximated variance of the squared loss.\n\n This function calculates the approximated variance of the squared loss\n\n .. math::\n 4\\sum_i w_i \\left|f\\left(x_i\\right)-f_ref\\left(x_i\\right)\\right|^2 \\sigma_f^2(x_i)\n\n Args:\n value_dict (dict): Contains calculated values of the model\n ground_truth (np.ndarray): The true values :math:`f_ref\\left(x_i\\right)`\n weights (np.ndarray): Weight for each data point, if None all data points count the same\n\n Returns:\n Loss value\n '
if ('ground_truth' not in kwargs):
raise AttributeError('SquaredLoss requires ground_truth.')
ground_truth = kwargs['ground_truth']
if (('weights' in kwargs) and (kwargs['weights'] is not None)):
weights = kwargs['weights']
else:
weights = np.ones_like(ground_truth)
diff_square = np.multiply(weights, np.square((value_dict['f'] - ground_truth)))
return np.sum((4 * np.multiply(diff_square, value_dict['var'])))
def gradient(self, value_dict: dict, **kwargs) -> Union[(np.ndarray, tuple[(np.ndarray, np.ndarray)])]:
'Returns the gradient of the squared loss.\n\n This function calculates the gradient of the squared loss between the values in value_dict\n and ground_truth as\n\n .. math::\n \\sum_j \\sum_i w_i \\left(f\\left(x_i\\right)-f_ref\\left(x_i\\right)\\right) \\frac{\\partial f(x_i)}{\\partial p_j}\n\n Args:\n value_dict (dict): Contains calculated values of the model\n ground_truth (np.ndarray): The true values :math:`f_ref\\left(x_i\\right)`\n weights (np.ndarray): Weight for each data point, if None all data points count the same\n multiple_output (bool): True if the QNN has multiple outputs\n\n Returns:\n Gradient values\n '
if ('ground_truth' not in kwargs):
raise AttributeError('SquaredLoss requires ground_truth.')
ground_truth = kwargs['ground_truth']
if (('weights' in kwargs) and (kwargs['weights'] is not None)):
weights = kwargs['weights']
else:
weights = np.ones_like(ground_truth)
multiple_output = (('multiple_output' in kwargs) and kwargs['multiple_output'])
weighted_diff = np.multiply((value_dict['f'] - ground_truth), weights)
if (value_dict['dfdp'].shape[0] == 0):
d_p = np.array([])
elif multiple_output:
d_p = (2.0 * np.einsum('ij,ijk->k', weighted_diff, value_dict['dfdp']))
else:
d_p = (2.0 * np.einsum('j,jk->k', weighted_diff, value_dict['dfdp']))
if (not self._opt_param_op):
return d_p
if (value_dict['dfdop'].shape[0] == 0):
d_op = np.array([])
elif multiple_output:
d_op = (2.0 * np.einsum('ij,ijk->k', weighted_diff, value_dict['dfdop']))
else:
d_op = (2.0 * np.einsum('j,jk->k', weighted_diff, value_dict['dfdop']))
return (d_p, d_op)
|
class VarianceLoss(LossBase):
'Variance loss for regression.\n\n Args:\n alpha (float, Callable[[int], float]): Weight value :math:`\\alpha`\n '
def __init__(self, alpha: Union[(float, Callable[([int], float)])]=0.005):
super().__init__()
self._alpha = alpha
@property
def loss_variance_available(self) -> bool:
'Returns True since we neglect the variance of the variance.'
return True
@property
def loss_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss calculation.'
return ('var',)
@property
def variance_args_tuple(self) -> tuple:
'Returns evaluation tuple for variance calculation.'
return tuple()
@property
def gradient_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss gradient calculation.'
if self._opt_param_op:
return ('var', 'dvardp', 'dvardop')
return ('var', 'dvardp')
def value(self, value_dict: dict, **kwargs) -> float:
'Returns the variance.\n\n This function returns the weighted variance as\n\n .. math::\n L_\\operatorname{Var} = \\alpha \\sum_i \\operatorname{Var}_i\n\n Args:\n value_dict (dict): Contains calculated values of the model\n iteration (int): iteration number, if alpha is a callable function\n\n Returns:\n Loss value\n '
if callable(self._alpha):
if ('iteration' not in kwargs):
raise AttributeError('If alpha is callable, iteration is required.')
alpha = self._alpha(kwargs['iteration'])
else:
alpha = self._alpha
return (alpha * np.sum(value_dict['var']))
def variance(self, value_dict: dict, **kwargs) -> float:
'Returns 0 since we neglect the variance of the variance.'
return 0.0
def gradient(self, value_dict: dict, **kwargs) -> Union[(np.ndarray, tuple[(np.ndarray, np.ndarray)])]:
'Returns the gradient of the variance.\n\n This function calculates the gradient of the variance values in value_dict.\n\n Args:\n value_dict (dict): Contains calculated values of the model\n iteration (int): iteration number, if variance_factor is a function\n multiple_output (bool): True if the QNN has multiple outputs\n\n Returns:\n Gradient values\n '
if callable(self._alpha):
if ('iteration' not in kwargs):
raise AttributeError('If alpha is callable, iteration is required.')
alpha = self._alpha(kwargs['iteration'])
else:
alpha = self._alpha
multiple_output = (('multiple_output' in kwargs) and kwargs['multiple_output'])
if (value_dict['dfdp'].shape[0] == 0):
d_p = np.array([])
elif multiple_output:
d_p = (alpha * np.sum(value_dict['dvardp'], axis=(0, 1)))
else:
d_p = (alpha * np.sum(value_dict['dvardp'], axis=0))
if (not self._opt_param_op):
return d_p
if (value_dict['dfdop'].shape[0] == 0):
d_op = np.array([])
elif multiple_output:
d_op = (alpha * np.sum(value_dict['dvardop'], axis=(0, 1)))
else:
d_op = (alpha * np.sum(value_dict['dvardop'], axis=0))
return (d_p, d_op)
|
class ParameterRegularizationLoss(LossBase):
'Loss for parameter regularization.\n\n Possible implementations:\n\n * ``"L1"``: :math:`L=\\alpha \\sum_i \\left|p_i\\right|`\n * ``"L2"``: :math:`L=\\alpha \\sum_i p_i^2`\n\n Args:\n alpha (float, Callable[[int], float]): Weight value :math:`\\alpha`\n mode (str): Type of regularization, either \'L1\' or \'L2\' (default: \'L2\').\n parameter_list (list): List of parameters to regularize, None: all (default: None).\n parameter_operator_list (list): List of operator parameters to regularize, None: all\n (default: []).\n '
def __init__(self, alpha: Union[(float, Callable[([int], float)])]=0.005, mode: str='L2', parameter_list: Union[(list, None)]=None, parameter_operator_list: Union[(list, None)]=[]):
super().__init__()
self._alpha = alpha
self._mode = mode
if (self._mode not in ['L1', 'L2']):
raise ValueError("Type must be 'L1' or 'L2'!")
self._parameter_list = parameter_list
self._parameter_operator_list = parameter_operator_list
@property
def loss_variance_available(self) -> bool:
'Returns True since variance is zero (and available).'
return True
@property
def loss_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss calculation.'
return tuple()
@property
def variance_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss calculation.'
return tuple()
@property
def gradient_args_tuple(self) -> tuple:
'Returns evaluation tuple for loss gradient calculation.'
return tuple()
def value(self, value_dict: dict, **kwargs) -> float:
'Returns the variance.\n\n This function returns the weighted variance as\n\n .. math::\n L_\\text{var} = \\alpha \\sum_i \\var_i\n\n Args:\n value_dict (dict): Contains calculated values of the model\n iteration (int): iteration number, if alpha is a callable function\n\n Returns:\n Loss value\n '
if callable(self._alpha):
if ('iteration' not in kwargs):
raise AttributeError('If alpha is callable, iteration is required.')
alpha = self._alpha(kwargs['iteration'])
else:
alpha = self._alpha
loss = 0.0
if (self._parameter_list is None):
if (self._mode == 'L1'):
loss += np.sum(np.abs(value_dict['param']))
elif (self._mode == 'L2'):
loss += np.sum(np.square(value_dict['param']))
else:
raise ValueError('Type must be L1 or L2!')
elif (self._mode == 'L1'):
loss += np.sum(np.abs(value_dict['param'][self._parameter_list]))
elif (self._mode == 'L2'):
loss += np.sum(np.square(value_dict['param'][self._parameter_list]))
else:
raise ValueError('Type must be L1 or L2!')
if self._opt_param_op:
if (self._parameter_list is None):
if (self._mode == 'L1'):
loss += np.sum(np.abs(value_dict['param_op']))
elif (self._mode == 'L2'):
loss += np.sum(np.square(value_dict['param_op']))
else:
raise ValueError('Type must be L1 or L2!')
elif (self._mode == 'L1'):
loss += np.sum(np.abs(value_dict['param_op'][self._parameter_operator_list]))
elif (self._mode == 'L2'):
loss += np.sum(np.square(value_dict['param_op'][self._parameter_operator_list]))
else:
raise ValueError('Type must be L1 or L2!')
return (alpha * loss)
def variance(self, value_dict: dict, **kwargs) -> float:
'Returns 0 since the variance is equal to zero.'
return 0.0
def gradient(self, value_dict: dict, **kwargs) -> Union[(np.ndarray, tuple[(np.ndarray, np.ndarray)])]:
'Returns the gradient of the variance.\n\n This function calculates the gradient of the variance values in value_dict.\n\n Args:\n value_dict (dict): Contains calculated values of the model\n iteration (int): iteration number, if variance_factor is a function\n\n Returns:\n Gradient values\n '
if callable(self._alpha):
if ('iteration' not in kwargs):
raise AttributeError('If alpha is callable, iteration is required.')
alpha = self._alpha(kwargs['iteration'])
else:
alpha = self._alpha
d_p = np.zeros_like(value_dict['param'])
if (self._parameter_list is None):
if (self._mode == 'L1'):
d_p = (alpha * np.sign(value_dict['param']))
elif (self._mode == 'L2'):
d_p = ((alpha * 2.0) * value_dict['param'])
else:
raise ValueError('Type must be L1 or L2!')
elif (self._mode == 'L1'):
d_p[self._parameter_list] = (alpha * np.sign(value_dict['param'][self._parameter_list]))
elif (self._mode == 'L2'):
d_p[self._parameter_list] = ((alpha * 2.0) * value_dict['param'][self._parameter_list])
else:
raise ValueError('Type must be L1 or L2!')
if (not self._opt_param_op):
return d_p
d_op = np.zeros_like(value_dict['param_op'])
if (self._parameter_operator_list is None):
if (self._mode == 'L1'):
d_op = (alpha * np.sign(value_dict['param_op']))
elif (self._mode == 'L2'):
d_op = ((alpha * 2.0) * value_dict['param_op'])
else:
raise ValueError('Type must be L1 or L2!')
elif (self._mode == 'L1'):
d_op[self._parameter_operator_list] = (alpha * np.sign(value_dict['param_op'][self._parameter_operator_list]))
elif (self._mode == 'L2'):
d_op[self._parameter_operator_list] = ((alpha * 2.0) * value_dict['param_op'][self._parameter_operator_list])
else:
raise ValueError('Type must be L1 or L2!')
return (d_p, d_op)
|
class Expec():
'Data structure that holds the set-up of derivative of the expectation value.\n\n Args:\n wave_function (Union[str, tuple, ParameterVectorElement]): Describes the wave function or\n its derivative. If tuple or ParameterVectorElement the differentiation with respect to\n the parameters in the tuple or with respect to the ParameterVectorElement is considered\n observable (str): String for the expectation value observable (``"O"``, ``"OO"``,\n ``"dop"``, ``"dopdop"``, ``"var"``).\n label (str): Label that is used for displaying or in the value dict of the QNN class.\n\n '
def __init__(self, wave_function: Union[(str, tuple, ParameterVectorElement)], observable: str, label: str=''):
self.wave_function = wave_function
self.operator = observable
self.label = label
def __var_to_str(self, val: Union[(str, tuple, ParameterExpression, ParameterVector)]) -> str:
'Converter for variables to string.\n\n Args:\n val (Union[str, tuple, ParameterExpression, ParameterVector]): Input that is converted\n to string\n\n Returns:\n String that contains the converted val variable\n\n '
if isinstance(val, ParameterExpression):
out_str = str(val.name)
elif isinstance(val, ParameterVector):
out_str = str(val.name)
elif isinstance(val, tuple):
out_str = '('
for x in val:
out_str += (self.__var_to_str(x) + ',')
out_str += ')'
elif isinstance(val, str):
out_str = val
else:
out_str = str(val)
return out_str
def __repr__(self) -> str:
'Build-in string conversion for Expec class.'
return self.__str__()
def __str__(self) -> str:
'Build-in string conversion for Expec class.'
return (((((('Expec(' + self.__var_to_str(self.wave_function)) + ',') + self.__var_to_str(self.operator)) + ',') + self.__var_to_str(self.label)) + ')')
def __len__(self) -> int:
'Build-in length of Expec class (return 1).'
return 1
def __eq__(self, other) -> bool:
'Build-in comparison of two Expec class objects.'
return (isinstance(other, self.__class__) and (self.wave_function == other.wave_function) and (self.operator == other.operator))
def __hash__(self) -> int:
'Build-in hash function for Expec class.'
return hash((self.wave_function, self.operator))
@classmethod
def from_string(cls, val: str):
'Converts an input string to the Expec data structure.\n\n Args:\n String that defines the expectation value derivative\n\n Returns:\n Associated Expec object\n\n '
if isinstance(val, str):
if (val == 'f'):
return cls('I', 'O', 'f')
elif (val == 'dfdx'):
return cls('dx', 'O', 'dfdx')
elif (val == 'dfdxdx'):
return cls('dxdx', 'O', 'dfdxdx')
elif (val == 'laplace'):
return cls('laplace', 'O', 'laplace')
elif (val == 'laplace_dp'):
return cls('laplace_dp', 'O', 'laplace_dp')
elif (val == 'laplace_dop'):
return cls('laplace', 'dop', 'laplace_dop')
elif (val == 'dfdp'):
return cls('dp', 'O', 'dfdp')
elif (val == 'dfdpdp'):
return cls('dpdp', 'O', 'dfdpdp')
elif (val == 'dfdopdp'):
return cls('dp', 'dop', 'dfdop')
elif (val == 'dfdpdop'):
raise ValueError('Not implemented, please use dfdopdp instead and transpose!')
elif (val == 'dfdop'):
return cls('I', 'dop', 'dfdop')
elif (val == 'dfdopdop'):
return cls('I', 'dopdop', 'dfdop')
elif (val == 'dfdpdx'):
return cls('dpdx', 'O', 'dfdpdx')
elif (val == 'dfdopdx'):
return cls('dx', 'dop', 'dfdopdx')
elif (val == 'dfdopdxdx'):
return cls('dxdx', 'dop', 'dfdopdxdx')
elif (val == 'fcc'):
return cls('I', 'OO', 'fcc')
elif (val == 'dfccdx'):
return cls('dx', 'OO', 'dfccdx')
elif (val == 'dfccdxdx'):
return cls('dxdx', 'OO', 'dfccdxdx')
elif (val == 'dfccdp'):
return cls('dp', 'OO', 'dfccdp')
elif (val == 'dfccdpdp'):
return cls('dpdp', 'OO', 'dfccdpdp')
elif (val == 'dfccdopdx'):
return cls('dx', 'OOdop', 'dfccdpdp')
elif (val == 'dfccdop'):
return cls('I', 'OOdop', 'dfccdop')
elif (val == 'dfccdopdop'):
return cls('I', 'OOdopdop', 'dfccdopdop')
elif (val in ('var', 'varf')):
return cls('I', 'var', val)
elif (val in ('dvardx', 'dvarfdx')):
return cls('dx', 'var', val)
elif (val in ('dvardp', 'dvarfdp')):
return cls('dp', 'var', val)
elif (val in ('dvardop', 'dvarfdop')):
return cls('I', 'dvardop', val)
elif (val == 'fischer'):
return cls('I', 'fischer', val)
else:
raise ValueError('Unknown input string:', val)
else:
raise TypeError('String expected, found type:', type(val))
@classmethod
def from_tuple(cls, val: tuple, operator: str='O'):
"Creates an Expec object from an input tuple\n\n Args:\n val (tuple): Tuple for the differentiation of the wave function.\n operator (str): String for the operator, default='O'.\n\n Returns\n Associated Expec object\n "
return cls(val, operator, val)
@classmethod
def from_parameter(cls, val: ParameterVectorElement, operator: str='O'):
"Creates an Expec object from an inputted parameter\n\n Args:\n val (ParameterVectorElement): Parameter that is used in the differentiation.\n operator (str): String for the operator, default='O'.\n\n Returns\n Associated Expec object\n "
return cls((val,), operator, (val,))
@classmethod
def from_variable(cls, val):
'Creates an Expec object from an inputted value\n\n Args:\n val (Union[Expec,str,tuple,ParameterVectorElement]): value that defines the derivative\n\n Returns\n Associated Expec object\n '
if isinstance(val, Expec):
return val
elif isinstance(val, str):
return cls.from_string(val)
elif isinstance(val, tuple):
return cls.from_tuple(val)
elif isinstance(val, ParameterVectorElement):
return cls.from_parameter(val)
else:
raise TypeError('Unsupported type:', type(val))
|
class QNN():
'A class for working with QNNs and its derivatives\n\n Args:\n pqc (EncodingCircuitBase) : parameterized quantum circuit in encoding circuit format\n operator (Union[ObservableBase,list]): Operator that is used in the expectation\n value of the QNN. Can be a list for multiple outputs.\n executor (Executor) : Executor that is used for the evaluation of the QNN\n optree_caching : Caching of the optree expressions (default = True recommended)\n result_caching : Caching of the result for each `x`, `param`, `param_op` combination\n (default = True)\n '
def __init__(self, pqc: EncodingCircuitBase, operator: Union[(ObservableBase, list)], executor: Executor, optree_caching=True, result_caching=True) -> None:
self._executor = executor
self._inital_shots = self._executor.get_shots()
self._optree_caching = optree_caching
self._result_caching = result_caching
self.pqc = TranspiledEncodingCircuit(pqc, self._executor.backend)
self.operator = operator
if (self._executor.optree_executor == 'estimator'):
self._estimator = self._executor.get_estimator()
self._sampler = None
else:
self._sampler = self._executor.get_sampler()
self._estimator = None
self._initilize_derivative()
def get_params(self, deep: bool=True) -> dict:
'Returns the dictionary of the hyper-parameters of the QNN.\n\n In case of multiple outputs, the hyper-parameters of the operator are prefixed\n with ``op0__``, ``op1__``, etc.\n\n '
params = dict(num_qubits=self.num_qubits)
if deep:
params.update(self.pqc.get_params())
if isinstance(self.operator, list):
for (i, oper) in enumerate(self.operator):
oper_dict = oper.get_params()
for (key, value) in oper_dict.items():
if (key != 'num_qubits'):
params[((('op' + str(i)) + '__') + key)] = value
else:
params.update(self.operator.get_params())
return params
def set_params(self, **params) -> None:
'Sets the hyper-parameters of the QNN\n\n In case of multiple outputs, the hyper-parameters of the operator are prefixed\n with ``op0__``, ``op1__``, etc.\n\n Args:\n params: Hyper-parameters that are adjusted, e.g. ``num_qubits=4``\n\n '
valid_params = self.get_params()
for (key, value) in params.items():
if (key not in valid_params):
raise ValueError(f'Invalid parameter {key!r}. Valid parameters are {sorted(valid_params)!r}.')
dict_pqc = {}
for (key, value) in params.items():
if (key in self.pqc.get_params()):
dict_pqc[key] = value
if (len(dict_pqc) > 0):
self.pqc.set_params(**dict_pqc)
if isinstance(self.operator, list):
for (i, oper) in enumerate(self.operator):
dict_operator = {}
for (key, value) in params.items():
if (key == 'num_qubits'):
dict_operator[key] = value
elif key.startswith((('op' + str(i)) + '__')):
dict_operator[key.split('__', 1)[1]] = value
if (len(dict_operator) > 0):
oper.set_params(**dict_operator)
else:
dict_operator = {}
for (key, value) in params.items():
if (key in self.operator.get_params()):
dict_operator[key] = value
if (len(dict_operator) > 0):
self.operator.set_params(**dict_operator)
self._initilize_derivative()
def _initilize_derivative(self):
'Initializes the derivative classes'
num_qubits_operator = 0
if isinstance(self.operator, list):
for i in range(len(self.operator)):
self.operator[i].set_map(self.pqc.qubit_map, self.pqc.num_physical_qubits)
num_qubits_operator = max(num_qubits_operator, self.operator[i].num_qubits)
else:
self.operator.set_map(self.pqc.qubit_map, self.pqc.num_physical_qubits)
num_qubits_operator = self.operator.num_qubits
self.operator_derivatives = ObservableDerivatives(self.operator, self._optree_caching)
self.pqc_derivatives = EncodingCircuitDerivatives(self.pqc, self._optree_caching)
if (self.pqc.num_virtual_qubits != num_qubits_operator):
raise ValueError('Number of Qubits are not the same!')
else:
self._num_qubits = self.pqc.num_virtual_qubits
if (self._executor.optree_executor == 'sampler'):
operator_string = str(self.operator)
if (('X' in operator_string) or ('Y' in operator_string)):
self._split_paulis = True
print(('The observable includes X and Y gates, consider switching' + ' to the Estimator primitive for a faster performance!'))
else:
self._split_paulis = False
else:
self._split_paulis = False
self.result_container = {}
def set_shots(self, num_shots: int) -> None:
'Sets the number shots for the next evaluations.\n\n Args:\n num_shots (int): Number of shots that are set\n '
self._executor.set_shots(num_shots)
def get_shots(self) -> int:
'Getter for the number of shots.\n\n Returns:\n Returns the number of shots that are used for the current evaluation.'
return self._executor.get_shots()
def reset_shots(self) -> None:
'Function for resetting the number of shots to the initial ones'
self._executor.reset_shots()
@property
def num_qubits(self) -> int:
'Return the number of qubits of the QNN'
return self._num_qubits
@property
def num_features(self) -> int:
'Return the dimension of the features of the PQC'
return self.pqc_derivatives.num_features
@property
def num_parameters(self) -> int:
'Return the number of trainable parameters of the PQC'
return self.pqc_derivatives.num_parameters
@property
def num_operator(self) -> int:
'Return the number outputs'
return self.operator_derivatives.num_operators
@property
def num_parameters_observable(self) -> int:
'Return the number of trainable parameters of the expectation value operator'
return self.operator_derivatives.num_parameters
@property
def multiple_output(self) -> bool:
'Return true if multiple outputs are used'
return self.operator_derivatives.multiple_output
@property
def parameters(self):
'Return the parameter vector of the PQC.'
return self.pqc_derivatives.parameter_vector
@property
def features(self):
'Return the feature vector of the PQC.'
return self.pqc_derivatives.feature_vector
@property
def parameters_operator(self):
'Return the parameter vector of the cost operator.'
return self.operator_derivatives.parameter_vector
def get_optree_from_string(self, input_string: str):
'Return the OpTree expression of the given PQC\n\n Args:\n input_string (str): String from which the OpTree is obtained.\n\n Returns:\n OpTree structure created from the string.\n '
return self.get_optree_from_expec(Expec.from_string(input_string))
def get_optree_from_expec(self, input_expec: Expec):
'Returns the OpTree expression for the given :class:`Expec` object.\n\n Args:\n input_expec (Expec): :class:`Expec` object from which the OpTree is obtained\n\n Returns:\n OpTree structure created from the :class:`Expec` object.\n '
return OpTree.gen_expectation_tree(self.pqc_derivatives.get_derivative(input_expec.wave_function), self.operator_derivatives.get_derivative(input_expec.operator))
def evaluate_diff_tuple(self, diff_tuple, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluate the given tuple of derivatives of the PQC.\n\n Args:\n diff_tuple: Tuple with parameters used in the differentiation\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Differentiated values of the QNN\n '
return self.evaluate((diff_tuple,), x, param, param_op)[diff_tuple]
def evaluate_from_string(self, input_string: str, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluate the given PQC from an input string\n\n Args:\n input_string (str): Input string that determines the evaluated value(s)\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Values from the QNN defined by the string\n '
return self.evaluate(input_string, x, param, param_op)[input_string]
def evaluate_f(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluates the QNN\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Values from the QNN\n '
return self.evaluate_from_string('f', x, param, param_op)
def evaluate_dfdx(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluates derivatives of the QNN with respect to `x`.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivatives of the the QNN with respect to `x`\n '
return self.evaluate_from_string('dfdx', x, param, param_op)
def evaluate_dfdxdx(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluates second order derivatives of the QNN with respect to `x`.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated second order derivatives of the the QNN with respect to `x`\n '
return self.evaluate_from_string('dfdxdx', x, param, param_op)
def evaluate_laplace(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluates Laplacian of the QNN for `x`.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated Laplacian of the the QNN for `x`\n '
return self.evaluate_from_string('laplace', x, param, param_op)
def evaluate_laplace_dp(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
"\n Evaluates the derivative of the Laplacian with respect to the PQC's parameters.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivative of the Laplacian with respect to the PQC's parameters\n "
return self.evaluate_from_string('laplace_dp', x, param, param_op)
def evaluate_laplace_dop(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
"Evaluates the derivative of the Laplacian with respect to the operator's parameters.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivative of the Laplacian with respect to the operator's parameters\n "
return self.evaluate_from_string('laplace_dop', x, param, param_op)
def evaluate_dfdp(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
"Evaluates the derivative of the QNN with respect to the PQC's parameters.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivative of the the QNN with respect to the PQC's parameters.\n "
return self.evaluate_from_string('dfdp', x, param, param_op)
def evaluate_dfdop(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
"Evaluates the derivative of the QNN with respect to the operator's parameters.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivative of the the QNN with respect to the operator's parameters.\n "
return self.evaluate_from_string('dfdop', x, param, param_op)
def evaluate_dfdpdx(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
"Evaluates the derivative of the QNN with respect to the PQC's parameters and `x`.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivative of the QNN with respect to the PQC's parameters and `x`\n "
return self.evaluate_from_string('dfdpdx', x, param, param_op)
def evaluate_dfdopdx(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
"Evaluates the derivative of the QNN with respect to the operator's parameters and `x`.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated derivative of the QNN with respect to the operator's parameters and `x`\n "
return self.evaluate_from_string('dfdopdx', x, param, param_op)
def evaluate_variance(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluates the variance (<OO>-<O>^2) of the QNN\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated variance of the the QNN\n '
return self.evaluate_from_string('var', x, param, param_op)
def evaluate_var(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> Union[(float, np.ndarray)]:
'Evaluates the variance (<OO>-<O>^2) of the QNN\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n param_op (Union[float,np.ndarray]): Parameter values of the operator\n\n Returns:\n Evaluated variance of the the QNN\n '
return self.evaluate_variance(x, param, param_op)
def evaluate_probabilities(self, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)]):
'Evaluate the probabilities of the encoding circuit / PQC.\n\n The function only works with the QuantumInstance executer.\n\n Args:\n x (Union[float,np.ndarray]): Input data values\n param (Union[float,np.ndarray]): Parameter values of the PQC\n\n Returns:\n List of probabilities stored in the SparseVectorStateFn format.\n (dictionary can be obtained by `.to_dict_fn()` or `to_dict_fn().primitive`)\n '
optree = self.pqc_derivatives.get_derivative('I')
dictionary = dict(zip(self.parameters, param))
dictionary.update(zip(self.features, x))
OpTree.assign_parameters(optree, dictionary, inplace=True)
if isinstance(optree, QuantumCircuit):
circuit = optree
elif isinstance(optree, OpTreeCircuit):
circuit = optree.circuit
else:
raise TypeError('Unsported optree type:', type(optree))
if (circuit.num_clbits == 0):
circuit.measure_all()
sampler = self._executor.get_sampler()
result = sampler.run(circuit).result()
return result.quasi_dists[0].binary_probabilities()
def evaluate(self, values, x: Union[(float, np.ndarray)], param: Union[(float, np.ndarray)], param_op: Union[(float, np.ndarray)]) -> dict:
'General function for evaluating the output of derivatives of the QNN.\n\n Evaluation works for given combination of\n input features `x` and parameters `param` and `param_op`.\n The function includes caching of results\n\n If `x`, `param`, and/or `param_op` are given as a nested list\n (for example multiple sets of parameters),\n the values are returned in a nested list.\n\n Args:\n values : list of what values and derivatives of the QNN are evaluated.\n Multiple inputs have to be a tuple.\n x (np.ndarray): Values of the input feature data.\n param (np.ndarray): Parameter values of the PQC parameters\n param_op (np.ndarray): Parameter values of the operator parameters\n\n\n Results:\n Returns a dictionary with the computed values.\n The keys of the dictionary are given by the entries in the values tuple\n\n '
def generate_real_todo_dic(values, value_dict):
'Converts the input values into a sorted dictionary\n of of Expec items'
def add_to_real_todo_dic(item: Expec, real_todo_dic, value_dict):
if (item not in value_dict):
if (item.wave_function in real_todo_dic):
if (item not in real_todo_dic[item.wave_function]):
real_todo_dic[item.wave_function].append(item)
else:
real_todo_dic[item.wave_function] = [item]
return real_todo_dic
try:
expec_list = [Expec.from_variable(i) for i in values]
except TypeError:
expec_list = [Expec.from_variable(values)]
real_todo_dic = {}
for i in expec_list:
if ((i.operator == 'var') and (i.wave_function == 'I')):
real_todo_dic = add_to_real_todo_dic(Expec('I', 'OO'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('I', 'O'), real_todo_dic, value_dict)
elif ((i.operator == 'var') and (i.wave_function == 'dx')):
real_todo_dic = add_to_real_todo_dic(Expec('dx', 'OO'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('I', 'O'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('dx', 'O'), real_todo_dic, value_dict)
elif ((i.operator == 'var') and (i.wave_function == 'dp')):
real_todo_dic = add_to_real_todo_dic(Expec('dp', 'OO'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('I', 'O'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('dp', 'O'), real_todo_dic, value_dict)
elif ((i.operator == 'dvardop') and (i.wave_function == 'I')):
real_todo_dic = add_to_real_todo_dic(Expec('I', 'OOdop'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('I', 'O'), real_todo_dic, value_dict)
real_todo_dic = add_to_real_todo_dic(Expec('I', 'dop'), real_todo_dic, value_dict)
else:
real_todo_dic = add_to_real_todo_dic(i, real_todo_dic, value_dict)
return real_todo_dic
def to_tuple(x):
'helper function for converting data into hashable tuples'
def flatten(container):
for i in container:
if isinstance(i, (list, tuple, np.ndarray)):
for j in flatten(i):
(yield j)
else:
(yield i)
if isinstance(x, float):
return tuple([x])
elif (len(np.shape(x)) == 1):
return tuple(list(x))
else:
return tuple(flatten(x))
(x_inp, multi_x) = adjust_features(x, self.num_features)
(param_inp, multi_param) = adjust_parameters(param, self.num_parameters)
(param_op_inp, multi_param_op) = adjust_parameters(param_op, self.num_parameters_observable)
dict_encoding_circuit = []
for x_inp_ in x_inp:
dd = dict(zip(self.pqc_derivatives.feature_vector, x_inp_))
for param_inp_ in param_inp:
ddd = dd.copy()
ddd.update(zip(self.pqc_derivatives.parameter_vector, param_inp_))
dict_encoding_circuit.append(ddd)
dict_operator = [dict(zip(self.operator_derivatives.parameter_vector, p)) for p in param_op_inp]
if (not isinstance(values, tuple)):
values = (values,)
indices = np.argsort([str(t) for t in values])
values = tuple([values[i] for i in indices])
if (self._result_caching == True):
caching_tuple = (to_tuple(x), to_tuple(param), to_tuple(param_op))
value_dict = self.result_container.get(caching_tuple, {})
else:
value_dict = {}
real_todo_dic = generate_real_todo_dic(values, value_dict)
for (key, op_list) in real_todo_dic.items():
operators = OpTreeList([self.operator_derivatives.get_derivative(expec_.operator) for expec_ in op_list])
pqc_optree = self.pqc_derivatives.get_derivative(key)
num_nested = OpTree.get_num_nested_lists(pqc_optree)
if (self._sampler is not None):
val = OpTree.evaluate.evaluate_with_sampler(pqc_optree, operators, dict_encoding_circuit, dict_operator, self._sampler)
elif (self._estimator is not None):
val = OpTree.evaluate.evaluate_with_estimator(pqc_optree, operators, dict_encoding_circuit, dict_operator, self._estimator)
else:
raise ValueError('No execution is set!')
set_empty = False
if (val.shape[0] == 0):
set_empty = True
if (set_empty is False):
ilist = list(range(len(val.shape)))
swapp_list = (([ilist[(2 + num_nested)]] + [ilist[0]]) + [ilist[1]])
length = (3 + num_nested)
if self.multiple_output:
length += 1
swapp_list = (swapp_list + [ilist[(- 1)]])
if (len(ilist) > length):
if self.multiple_output:
swapp_list = (swapp_list + ilist[(3 + num_nested):(- 1)])
else:
swapp_list = (swapp_list + ilist[(3 + num_nested):])
if (num_nested > 0):
swapp_list = (swapp_list + ilist[2:(2 + num_nested)])
val = np.transpose(val, axes=swapp_list)
ioff = 0
for (iexpec, expec_) in enumerate(op_list):
if set_empty:
value_dict[expec_] = np.array([])
else:
if isinstance(val[iexpec], object):
val_final = np.array(val[iexpec].tolist(), dtype=float)
else:
val_final = val[iexpec]
reshape_list = []
shape = val_final.shape
if multi_x:
reshape_list.append(len(x))
if multi_param:
reshape_list.append(len(param))
if multi_param_op:
reshape_list.append(shape[1])
if self.multiple_output:
reshape_list.append(shape[2])
if self.multiple_output:
if (len(shape) > 3):
reshape_list += list(shape[3:])
elif (len(shape) > 2):
reshape_list += list(shape[2:])
if (len(reshape_list) == 0):
value_dict[expec_] = val_final.reshape((- 1))[0]
else:
value_dict[expec_] = val_final.reshape(reshape_list)
ioff = (ioff + 1)
for todo in values:
todo_expec = Expec.from_variable(todo)
if ((todo_expec.operator == 'var') and (todo_expec.wave_function == 'I')):
value_dict[todo_expec] = (value_dict[Expec('I', 'OO')] - np.square(value_dict[Expec('I', 'O')]))
elif ((todo_expec.operator == 'var') and (todo_expec.wave_function == 'dx')):
if (self.num_features == 1):
value_dict[todo_expec] = (value_dict[Expec('dx', 'OO')] - (2.0 * np.multiply(value_dict[Expec('dx', 'O')], value_dict[Expec('I', 'O')])))
else:
value_dict[todo_expec] = np.zeros(value_dict[Expec('dx', 'OO')].shape)
for i in range(value_dict[Expec('dx', 'OO')].shape[(- 1)]):
value_dict[todo_expec][(..., i)] = (value_dict[Expec('dx', 'OO')][(..., i)] - (2.0 * np.multiply(value_dict[Expec('dx', 'O')][(..., i)], value_dict[Expec('I', 'O')])))
elif ((todo_expec.operator == 'var') and (todo_expec.wave_function == 'dp')):
value_dict[todo_expec] = np.zeros(value_dict[Expec('dp', 'OO')].shape)
for i in range(value_dict[Expec('dp', 'OO')].shape[(- 1)]):
value_dict[todo_expec][(..., i)] = (value_dict[Expec('dp', 'OO')][(..., i)] - (2.0 * np.multiply(value_dict[Expec('dp', 'O')][(..., i)], value_dict[Expec('I', 'O')])))
elif ((todo_expec.operator == 'dvardop') and (todo_expec.wave_function == 'I')):
value_dict[todo_expec] = np.zeros(value_dict[Expec('I', 'OOdop')].shape)
for i in range(value_dict[Expec('I', 'OOdop')].shape[(- 1)]):
value_dict[todo_expec][(..., i)] = (value_dict[Expec('I', 'OOdop')][(..., i)] - (2.0 * np.multiply(value_dict[Expec('I', 'dop')][(..., i)], value_dict[Expec('I', 'O')])))
value_dict[todo] = value_dict[todo_expec]
if (isinstance(todo, Expec) and (todo.label != '')):
value_dict[todo.label] = value_dict[todo_expec]
value_dict['x'] = x
value_dict['param'] = param
value_dict['param_op'] = param_op
if self._result_caching:
self.result_container[caching_tuple] = value_dict
return value_dict
|
class QNNClassifier(BaseQNN, ClassifierMixin):
'Quantum Neural Network for Classification.\n\n This class implements a quantum neural network (QNN) for classification with a scikit-learn\n interface. A parameterized quantum circuit and a possibly parameterized operator are used\n as a ML model. They are trained according to a specified loss using the specified optimizer.\n Mini-batch training is possible.\n\n Args:\n encoding_circuit (EncodingCircuitBase): The parameterized quantum circuit (PQC) part of the QNN.\n For a list of encoding circuits, check this list of implemented :ref:`encoding_circuits`.\n operator (Union[ObservableBase, list[ObservableBase]]): The operator that\n is used in the expectation value of the QNN. Can be a list for multiple outputs. For a\n list of operators, check this list of implemented :ref:`operators`.\n executor (Executor): Executor instance.\n loss (LossBase): The loss function to be optimized. Can also be combination of multiple\n loss functions.\n optimizer (OptimizerBase): The optimizer instance that is used to minimize the loss\n function.\n param_ini (np.ndarray, default=None): Initial values of the parameters of the PQC.\n param_op_ini (np.ndarray, default=None): Initial values of the parameters of the operator.\n batch_size (int, default=None): Number of data points in each batch in mini-batch training.\n Will only be used if optimizer is of type SGDMixin.\n epochs (int, default=None): Number of epochs of SGD to perform. Will only be used if\n optimizer is of type SGDMixin.\n shuffle (bool, default=None): If True, data points get shuffled before each epoch. Will\n only be used if optimizer is of type SGDMixin.\n opt_param_op (bool, default=True): If True, the operators parameters get optimized.\n variance (Union[float, Callable], default=None): The variance factor to be used. If it is\n None, the variance regularization will not be used. Else this determines the strength\n of the variance regularization.\n parameter_seed (Union[int, None], default=0): Seed for the random number generator for the\n parameter initialization, if `param_ini` or `param_op_ini` is ``None``.\n caching (bool, default=True): If True, the results of the QNN are cached.\n pretrained (bool, default=False): Set to true if the supplied parameters are already\n trained.\n callback (Union[Callable, str, None], default=None): A callback for the optimization loop.\n Can be either a Callable, "pbar" (which uses a :class:`tqdm.tqdm` process bar) or None.\n If None, the optimizers (default) callback will be used.\n\n See Also\n --------\n squlearn.qnn.QNNRegressor : Quantum Neural Network for Regression.\n\n **Example**\n\n .. code-block::\n\n from squlearn import Executor\n from squlearn.encoding_circuit import ChebyshevRx\n from squlearn.observables import SummedPaulis\n from squlearn.qnn import QNNClassifier, SquaredLoss\n from squlearn.optimizers import SLSQP\n from sklearn.datasets import make_blobs\n from sklearn.model_selection import train_test_split\n from sklearn.preprocessing import MinMaxScaler\n\n X, y = make_blobs(60, centers=2, random_state=0)\n X = MinMaxScaler((-0.9, 0.9)).fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=42\n )\n clf = QNNClassifier(\n ChebyshevRx(4, 2, 2),\n SummedPaulis(4),\n Executor("statevector_simulator"),\n SquaredLoss(),\n SLSQP(),\n np.random.rand(16),\n np.random.rand(5)\n )\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n Methods:\n --------\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, operator: Union[(ObservableBase, list[ObservableBase])], executor: Executor, loss: LossBase, optimizer: OptimizerBase, param_ini: np.ndarray=None, param_op_ini: np.ndarray=None, batch_size: int=None, epochs: int=None, shuffle: bool=None, opt_param_op: bool=True, variance: Union[(float, Callable)]=None, shot_control: ShotControlBase=None, parameter_seed: Union[(int, None)]=0, caching: bool=True, pretrained: bool=False, callback: Union[(Callable, str, None)]='pbar', **kwargs) -> None:
super().__init__(encoding_circuit, operator, executor, loss, optimizer, param_ini, param_op_ini, batch_size, epochs, shuffle, opt_param_op, variance, shot_control, parameter_seed=parameter_seed, caching=caching, pretrained=pretrained, callback=callback, **kwargs)
self._label_binarizer = None
def predict(self, X: np.ndarray) -> np.ndarray:
'Predict using the QNN.\n\n Args:\n X : The input data.\n\n Returns:\n np.ndarray : The predicted values.\n '
if ((not self._is_fitted) and (not self.pretrained)):
raise RuntimeError('The model is not fitted.')
if (self.shot_control is not None):
self.shot_control.reset_shots()
pred = self._qnn.evaluate_f(X, self._param, self._param_op)
return self._label_binarizer.inverse_transform(pred)
def predict_proba(self, X: np.ndarray) -> np.ndarray:
'Return probabilities using the QNN.\n\n Args:\n X : The input data.\n\n Returns:\n np.ndarray : The probabilities\n '
if (self.shot_control is not None):
self.shot_control.reset()
pred = self._qnn.evaluate_f(X, self._param, self._param_op)
if (pred.ndim == 1):
return np.vstack([(1 - pred), pred]).T
return pred
def partial_fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
"Fit a model to data.\n\n This method will update the models parameters to fit the provided data.\n It won't reinitialize the models parameters.\n\n Args:\n X: Input data\n y: Labels\n weights: Weights for each data point\n "
if (not self._is_fitted):
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
if (len(y.shape) == 1):
y = self._label_binarizer.transform(y).ravel()
else:
y = self._label_binarizer.transform(y)
loss = self.loss
if (self.variance is not None):
loss = (loss + VarianceLoss(alpha=self.variance))
if (isinstance(self.optimizer, SGDMixin) and self.batch_size):
if self.opt_param_op:
(self._param, self._param_op) = train_mini_batch(self._qnn, X, y, self._param, self._param_op, loss=loss, optimizer=self.optimizer, shot_control=self.shot_control, batch_size=self.batch_size, epochs=self.epochs, shuffle=self.shuffle, weights=weights, opt_param_op=True)
else:
self._param = train_mini_batch(self._qnn, X, y, self._param, self._param_op, loss=loss, optimizer=self.optimizer, shot_control=self.shot_control, batch_size=self.batch_size, epochs=self.epochs, shuffle=self.shuffle, weights=weights, opt_param_op=False)
elif self.opt_param_op:
(self._param, self._param_op) = train(self._qnn, X, y, self._param, self._param_op, loss, self.optimizer, self.shot_control, weights, True)
else:
self._param = train(self._qnn, X, y, self._param, self._param_op, loss, self.optimizer, self.shot_control, weights, False)
self._is_fitted = True
def _fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
'Internal fit function.'
if (self.callback == 'pbar'):
self._pbar = tqdm(total=self._total_iterations, desc='fit', file=sys.stdout)
self.partial_fit(X, y, weights)
|
class QNNRegressor(BaseQNN, RegressorMixin):
'Quantum Neural Network for Regression.\n\n This class implements a quantum neural network (QNN) for regression with a scikit-learn interface.\n A parameterized quantum circuit and a possibly parameterized operator are used as a ML model.\n They are trained according to a specified loss using the specified optimizer. Mini-batch\n training is possible.\n\n Args:\n encoding_circuit (EncodingCircuitBase): The parameterized quantum circuit (PQC) part of the QNN.\n For a list of encoding circuits, check this list of implemented :ref:`encoding_circuits`.\n operator (Union[ObservableBase, list[ObservableBase]]): The operator that\n is used in the expectation value of the QNN. Can be a list for multiple outputs. For a\n list of operators, check this list of implemented :ref:`operators`.\n executor (Executor): Executor instance.\n loss (LossBase): The loss function to be optimized. Can also be combination of multiple\n loss functions.\n optimizer (OptimizerBase): The optimizer instance that is used to minimize the loss\n function.\n param_ini (np.ndarray, default=None): Initial values of the parameters of the PQC.\n param_op_ini (np.ndarray, default=None): Initial values of the parameters of the operator.\n batch_size (int, default=None): Number of data points in each batch in mini-batch training.\n Will only be used if optimizer is of type SGDMixin.\n epochs (int, default=None): Number of epochs of SGD to perform. Will only be used if\n optimizer is of type SGDMixin.\n shuffle (bool, default=None): If True, data points get shuffled before each epoch. Will\n only be used if optimizer is of type SGDMixin.\n opt_param_op (bool, default=True): If True, the operators parameters get optimized.\n variance (Union[float, Callable], default=None): The variance factor to be used. If it is\n None, the variance regularization will not be used. Else this determines the strength\n of the variance regularization.\n parameter_seed (Union[int, None], default=0): Seed for the random number generator for the\n parameter initialization, if `param_ini` or `param_op_ini` is ``None``.\n caching (bool, default=True): If True, the results of the QNN are cached.\n pretrained (bool, default=False): Set to true if the supplied parameters are already\n trained.\n callback (Union[Callable, str, None], default=None): A callback for the optimization loop.\n Can be either a Callable, "pbar" (which uses a :class:`tqdm.tqdm` process bar) or None.\n If None, the optimizers (default) callback will be used.\n\n See Also\n --------\n squlearn.qnn.QNNClassifier : Quantum Neural Network for Classification.\n\n **Example**\n\n .. code-block::\n\n import numpy as np\n from squlearn import Executor\n from squlearn.encoding_circuit import ChebyshevRx\n from squlearn.observables import IsingHamiltonian\n from squlearn.qnn import QNNRegressor, SquaredLoss\n from squlearn.optimizers import SLSQP\n from sklearn.model_selection import train_test_split\n\n X, y = np.arange(0.1, 0.9, 0.01), np.log(np.arange(0.1, 0.9, 0.01))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=42\n )\n reg = QNNRegressor(\n ChebyshevRx(4, 1, 2),\n IsingHamiltonian(4, I="S", Z="S", ZZ="S"),\n Executor("statevector_simulator"),\n SquaredLoss(),\n SLSQP(),\n np.random.rand(16),\n np.random.rand(5)\n )\n reg.fit(X_train, y_train)\n y_pred = reg.predict(X_test[:5])\n\n Methods:\n --------\n\n '
def __init__(self, encoding_circuit: EncodingCircuitBase, operator: Union[(ObservableBase, list[ObservableBase])], executor: Executor, loss: LossBase, optimizer: OptimizerBase, param_ini: np.ndarray=None, param_op_ini: np.ndarray=None, batch_size: int=None, epochs: int=None, shuffle: bool=None, opt_param_op: bool=True, variance: Union[(float, Callable)]=None, shot_control: ShotControlBase=None, parameter_seed: Union[(int, None)]=0, caching: bool=True, pretrained: bool=False, callback: Union[(Callable, str, None)]='pbar', **kwargs) -> None:
super().__init__(encoding_circuit, operator, executor, loss, optimizer, param_ini, param_op_ini, batch_size, epochs, shuffle, opt_param_op, variance, shot_control, parameter_seed=parameter_seed, caching=caching, pretrained=pretrained, callback=callback, **kwargs)
def predict(self, X: np.ndarray) -> np.ndarray:
'Predict using the QNN.\n\n Args:\n X : The input data.\n\n Returns:\n np.ndarray : The predicted values.\n '
if ((not self._is_fitted) and (not self.pretrained)):
warn('The model is not fitted.')
if (self.shot_control is not None):
self.shot_control.reset_shots()
return self._qnn.evaluate_f(X, self._param, self._param_op)
def partial_fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
"Fit a model to data.\n\n This method will update the models parameters to fit the provided data.\n It won't reinitialize the models parameters.\n\n Args:\n X: Input data\n y: Labels\n weights: Weights for each data point\n "
loss = self.loss
if (self.variance is not None):
loss = (loss + VarianceLoss(alpha=self.variance))
if (isinstance(self.optimizer, SGDMixin) and self.batch_size):
if self.opt_param_op:
(self._param, self._param_op) = train_mini_batch(self._qnn, X, y, self._param, self._param_op, loss=loss, optimizer=self.optimizer, shot_control=self.shot_control, batch_size=self.batch_size, epochs=self.epochs, shuffle=self.shuffle, weights=weights, opt_param_op=True)
else:
self._param = train_mini_batch(self._qnn, X, y, self._param, self._param_op, loss=loss, optimizer=self.optimizer, shot_control=self.shot_control, batch_size=self.batch_size, epochs=self.epochs, shuffle=self.shuffle, weights=weights, opt_param_op=False)
elif self.opt_param_op:
(self._param, self._param_op) = train(self._qnn, X, y, self._param, self._param_op, loss, self.optimizer, self.shot_control, weights, True)
else:
self._param = train(self._qnn, X, y, self._param, self._param_op, loss, self.optimizer, self.shot_control, weights, False)
self._is_fitted = True
def _fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
'Internal fit function.'
if (self.callback == 'pbar'):
self._pbar = tqdm(total=self._total_iterations, desc='fit', file=sys.stdout)
self.partial_fit(X, y, weights)
|
def get_variance_fac(v: float, a: float, b: float, offset: int=0):
'\n Function for adjusting the variance regularization along the iterations.\n\n Based on the sigmoid function, see Ref. [1] for details:\n\n .. math::\n \\alpha_{a,b,v}(i) = (1-v)\\frac{\\exp(a(b-i))}{\\exp(a(b-i))+\\frac{1}{b}}+v\n\n Args:\n v (float): Minimal variance factor value\n a (float): Decay of the variance factor\n b (float): Length of the plateau in the beginning\n offset (int): Offset for the number of iterations (e.g. for restart) (default:0).\n\n Returns:\n Returns function with iteration as input for adjusting the variance factor\n\n References:\n [1] D. A. Kreplin and M. Roth "Reduction of finite sampling noise in quantum neural networks".\n `arXiv:2306.01639 <https://arxiv.org/abs/2306.01639>`_ (2023).\n '
def get_variance_fac_func(iteration: int):
'Function that return the variance parameter for a given iteration.'
return ((((1 - v) * np.exp((a * ((b - offset) - iteration)))) / (np.exp((a * ((b - offset) - iteration))) + (1 / b))) + v)
return get_variance_fac_func
|
def get_lr_decay(lr_start: float, lr_end: float, iter_decay: float, iter_plateau: int=0):
'\n Function for running an Adam optimization with a decay in the learning rate.\n\n Can be inputted to the learning rate of the Adam optimization.\n\n Args:\n lr_start (float): start value of the learning rate\n lr_end (float): final value of the learning rate\n iter_decay (float): decay of the learning rate\n iter_plateau (int): length of the plateau of the start value (default: 0)\n\n Returns:\n Returns function with iteration as input for adjusting the learning rate\n '
a = ((np.log(lr_end) - np.log(lr_start)) / iter_decay)
def lr_decay(iteration: int):
'Function that return the learning rate for a given iteration.'
val = (lr_start * np.exp((a * float((iteration - iter_plateau)))))
val = np.clip(val, lr_end, lr_start)
return val
return lr_decay
|
class ShotControlBase():
'Base Class for shot control'
def __init__(self) -> None:
self._executor = None
self._initial_shots = None
def set_executor(self, executor: Executor) -> None:
'Function for setting the executor that is used for the shot control.\n\n Args:\n executor (Executor): Executor instance\n '
self._executor = executor
self._initial_shots = self._executor.shots
@property
def executor(self) -> Executor:
'Executor of that is used for shot control'
return self._executor
@property
def shots(self) -> int:
'Current number of shots'
if (self._executor is None):
raise ValueError('Executor not set, call set_executor() first')
return self._executor.shots
def reset_shots(self) -> None:
'Reset the shots to the initial value.'
if (self._executor is None):
raise ValueError('Executor not set, call set_executor() first')
if (self._initial_shots is not None):
self._executor.set_shots(self._initial_shots)
def set_shots_for_loss(self, **kwargs):
'Function for setting the shots for the loss function evaluation.\n\n Default function resets the shots to the initial value.\n\n Args:\n kwargs: Keyword arguments for the loss function evaluation\n '
self.reset_shots()
def set_shots_for_grad(self, **kwargs):
'Function for setting the shots for the gradient evaluation.\n\n Default function resets the shots to the initial value.\n\n Args:\n kwargs: Keyword arguments for the loss function evaluation\n '
if (self._executor is None):
raise ValueError('Executor not set, call set_executor() first')
self.reset_shots()
|
class ShotsFromRSTD(ShotControlBase):
'Shot control for setting the shots of the gradient evaluation after the RSTD of the loss.\n\n The number of shots in the gradient evaluation is set to:\n\n .. math::\n N_\\text{shots} = \\frac{\\sigma_L^2}{L^2 \\beta^2},\n\n where :math:`\\sigma_L` is the standard deviation of the loss, :math:`L` is the loss value and\n :math:`\\text{\\beta}` is the bound for the relative standard deviation (RSTD) of the loss.\n See Ref. [1] for details:\n\n Args:\n rstd_bound (float): Bound for the RSTD of the loss (default: 0.1)\n min_shots (int): Minimal number of shots (default: 100)\n max_shots (int): Maximal number of shots, is also used for function evaluation\n (default: 5000)\n\n References:\n [1] D. A. Kreplin and M. Roth "Reduction of finite sampling noise in quantum neural networks".\n `arXiv:2306.01639 <https://arxiv.org/abs/2306.01639>`_ (2023).\n '
def __init__(self, rstd_bound: float=0.1, min_shots: int=100, max_shots: int=5000) -> None:
super().__init__()
self.rstd_bound = rstd_bound
self.min_shots = min_shots
self.max_shots = max_shots
self._initial_shots = max_shots
def set_executor(self, executor: Executor) -> None:
'Function for setting the executor that is used for the shot control.\n\n Args:\n executor (Executor): Executor instance\n '
self._executor = executor
def set_shots_for_loss(self, **kwargs):
'Function for setting the shots for the loss function evaluation.\n\n Sets the shots to the maximal value.\n\n Args:\n kwargs: Keyword arguments for the loss function evaluation\n '
self._executor.set_shots(self.max_shots)
def set_shots_for_grad(self, **kwargs):
'Function for setting the shots for the gradient evaluation.\n\n Sets the shots to the value that is determined by the RSTD of the loss.\n\n Args:\n kwargs: Keyword arguments for the loss function evaluation\n\n '
if ('value' not in kwargs):
raise AttributeError('Value requires ground_truth.')
if ('variance' not in kwargs):
raise AttributeError('Variance requires variance.')
if (self._executor is None):
raise ValueError('Executor not set, call set_executor() first')
value = kwargs['value']
variance = kwargs['variance']
shots = int(np.divide(variance, (np.square(value) * np.square(self.rstd_bound))))
num_shots = min(max(shots, self.min_shots), self.max_shots)
self._executor.set_shots(num_shots)
|
def train(qnn: QNN, input_values: Union[(list, np.ndarray)], ground_truth: Union[(list, np.ndarray)], param_ini: Union[(list, np.ndarray)], param_op_ini: Union[(list, np.ndarray)], loss: LossBase, optimizer: OptimizerBase, shot_control: ShotControlBase=None, weights: Union[(list, np.ndarray)]=None, opt_param_op: bool=True):
'\n Function for training a given QNN.\n\n Args:\n QNN (QNN): QNN instance that is trained\n input_values (Union[list,np.ndarray]): List of input values, i.e. training data\n ground_truth (Union[list,np.ndarray]): List of ground truth values,\n e.g. labels of the training data\n param_ini (Union[list,np.ndarray]): Initial parameters of the encoding circuit\n param_op_ini (Union[list,np.ndarray]): Initial parameters of the observable\n loss (LossBase): Loss instance that is minimized\n optimizer (OptimizerBase): Optimizer instance that is used for the minimization\n shot_control (ShotControlBase): Shot control instance that is used for setting the shots\n for each optimization step (default: None)\n weights (Union[list,np.ndarray]): Weighting of the reference values. Has to be the same\n size as input and ground_truth (default : None)\n opt_param_op (bool): If True, observable parameters are optimized as well (default: True)\n\n Returns:\n Optimized parameters of the PQC, and, if opt_param_op=True,\n the optimized parameters of the observable\n '
if isinstance(weights, np.ndarray):
weights_values = weights
elif (weights is None):
weights_values = np.ones(ground_truth.shape)
else:
raise TypeError(f'Unknown weight format: {type(weights)}')
loss.set_opt_param_op(opt_param_op)
if (weights_values.shape != ground_truth.shape):
raise ValueError(f"Shape {weights_values.shape} of weight values doesn't match shape {ground_truth.shape} of reference values")
if (not isinstance(param_ini, np.ndarray)):
param = np.array([param_ini])
else:
param = param_ini
if (not isinstance(param_op_ini, np.ndarray)):
param_op = np.array([param_op_ini])
else:
param_op = param_op_ini
val_ini = param
if opt_param_op:
val_ini = np.concatenate((val_ini, param_op), axis=None)
iteration = 0
def _fun(theta):
nonlocal iteration
nonlocal optimizer
nonlocal param_op
if isinstance(optimizer, IterativeMixin):
iteration = optimizer.iteration
else:
iteration = None
if opt_param_op:
param_ = theta[:len(param_ini)]
param_op_ = theta[len(param_ini):]
else:
param_ = theta
param_op_ = param_op
if (shot_control is not None):
if isinstance(shot_control, ShotsFromRSTD):
shot_control.set_shots_for_loss()
loss_values = qnn.evaluate(loss.loss_args_tuple, input_values, param_, param_op_)
loss_value = loss.value(loss_values, ground_truth=ground_truth, weights=weights_values, iteration=iteration)
return loss_value
def _grad(theta):
nonlocal iteration
nonlocal optimizer
nonlocal param_op
if isinstance(optimizer, IterativeMixin):
iteration = optimizer.iteration
else:
iteration = None
if opt_param_op:
param_ = theta[:len(param_ini)]
param_op_ = theta[len(param_ini):]
else:
param_ = theta
param_op_ = param_op
if (shot_control is not None):
if isinstance(shot_control, ShotsFromRSTD):
if loss.loss_variance_available:
loss_variance = loss.variance(qnn.evaluate(loss.variance_args_tuple, input_values, param_, param_op_), ground_truth=ground_truth, weights=weights_values, iteration=iteration)
loss_values = loss.value(qnn.evaluate(loss.loss_args_tuple, input_values, param_, param_op_), ground_truth=ground_truth, weights=weights_values, iteration=iteration)
shot_control.set_shots_for_grad(value=loss_values, variance=loss_variance)
else:
raise ValueError('Loss variance necessary for ShotsFromRSTD shot control')
grad_values = qnn.evaluate(loss.gradient_args_tuple, input_values, param_, param_op_)
grad = np.concatenate(loss.gradient(grad_values, ground_truth=ground_truth, weights=weights_values, iteration=iteration, multiple_output=qnn.multiple_output, opt_param_op=opt_param_op), axis=None)
return grad
if (len(val_ini) == 0):
if opt_param_op:
return (np.array([]), np.array([]))
else:
return np.array([])
result = optimizer.minimize(_fun, val_ini, _grad, bounds=None)
if hasattr(result, 'x'):
result = result.x
if opt_param_op:
param = result[:len(param_ini)]
param_op = result[len(param_ini):]
return (param, param_op)
param = result
return param
|
def train_mini_batch(qnn: QNN, input_values: Union[(list, np.ndarray)], ground_truth: Union[(list, np.ndarray)], param_ini: Union[(list, np.ndarray)], param_op_ini: Union[(list, np.ndarray)], loss: LossBase, optimizer: OptimizerBase, shot_control: ShotControlBase=None, weights: Union[(list, np.ndarray)]=None, opt_param_op: bool=True, epochs: int=10, batch_size: int=None, shuffle=False):
'Minimize a loss function using mini-batch gradient descent.\n\n Args:\n QNN (QNN): QNN instance that is trained\n input_values (Union[list,np.ndarray]): List of input values, i.e. training data\n ground_truth (Union[list,np.ndarray]): List of ground truth values,\n e.g. labels of the training data\n param_ini (Union[list,np.ndarray]): Initial parameters of the encoding circuit\n param_op_ini (Union[list,np.ndarray]): Initial parameters of the observable\n loss (LossBase): Loss instance that is minimized\n optimizer (OptimizerBase): Optimizer instance that is used for the minimization\n shot_control (ShotControlBase): Shot control instance that is used for setting the shots\n for each optimization step (default: None)\n weights (Union[list,np.ndarray]): Weighting of the reference values. Has to be the same\n size as input and ground_truth (default : None)\n opt_param_op (bool): If True, observable parameters are optimized as well (default: True) epochs : Number of epochs of SGD to perform\n batch_size : Number of data points in each batch\n shuffle : If True, data points get shuffled before each epoch (default: False)\n\n Returns:\n Optimized parameters of the PQC, and, if opt_param_op=True,\n the optimized parameters of the observable\n '
if (not isinstance(optimizer, SGDMixin)):
raise TypeError(f'Optimizer {optimizer.__class__.__name__} is not supported for mini-batch gradient descent.')
if isinstance(weights, np.ndarray):
weights_values = weights
elif (weights is None):
weights_values = np.ones(ground_truth.shape)
else:
raise TypeError(f'Unknown weight format: {type(weights)}')
loss.set_opt_param_op(opt_param_op)
if (weights_values.shape != ground_truth.shape):
raise ValueError(f"Shape {weights_values.shape} of weight values doesn't match shape {ground_truth.shape} of reference values")
n_samples = len(input_values)
idcs = np.arange(n_samples)
if (epochs is None):
epochs = 10
if (batch_size is None):
batch_size = min(100, n_samples)
if (not isinstance(param_ini, np.ndarray)):
param = np.array([param_ini])
else:
param = param_ini
if (not isinstance(param_op_ini, np.ndarray)):
param_op = np.array([param_op_ini])
else:
param_op = param_op_ini
if (len(param_ini) == 0):
if opt_param_op:
if (len(param_op_ini) == 0):
return (np.array([]), np.array([]))
else:
return np.array([])
for epoch in range(epochs):
accumulated_loss = 0.0
if shuffle:
idcs = np.random.permutation(idcs)
for batch_slice in gen_batches(n_samples, batch_size):
if (shot_control is not None):
if isinstance(shot_control, ShotsFromRSTD):
shot_control.set_shots_for_loss()
loss_values = qnn.evaluate(loss.loss_args_tuple, input_values[idcs[batch_slice]], param, param_op)
batch_loss = loss.value(loss_values, ground_truth=ground_truth[idcs[batch_slice]], weights=weights_values[idcs[batch_slice]], iteration=epoch)
accumulated_loss += batch_loss
if (shot_control is not None):
if isinstance(shot_control, ShotsFromRSTD):
if loss.loss_variance_available:
batch_loss_variance = loss.variance(qnn.evaluate(loss.variance_args_tuple, input_values[idcs[batch_slice]], param, param_op), ground_truth=ground_truth[idcs[batch_slice]], weights=weights_values[idcs[batch_slice]], iteration=epoch)
shot_control.set_shots_for_grad(value=batch_loss, variance=batch_loss_variance)
else:
raise ValueError('Loss variance necessary for ShotsFromRSTD shot control')
diff_values = qnn.evaluate(loss.gradient_args_tuple, input_values[idcs[batch_slice]], param, param_op)
grad = loss.gradient(diff_values, ground_truth=ground_truth[idcs[batch_slice]], weights=weights_values[idcs[batch_slice]], iteration=epoch, multiple_output=qnn.multiple_output, opt_param_op=opt_param_op)
if opt_param_op:
updated_params = optimizer.step(x=np.concatenate((param, param_op), axis=None), grad=np.concatenate(grad, axis=None))
param = updated_params[:len(param_ini)]
param_op = updated_params[len(param_ini):]
else:
param = optimizer.step(x=param, grad=grad)
if optimizer.callback:
if opt_param_op:
optimizer.callback(epoch, np.concatenate((param, param_op), axis=None), np.concatenate(grad, axis=None), (accumulated_loss / n_samples))
if opt_param_op:
return (param, param_op)
return param
|
def adjust_features(x: Union[(np.ndarray, float)], x_length: int) -> Tuple[(np.ndarray, bool)]:
'Adjust the feature vector to the form [[]] if necessary.\n\n Args:\n x (np.ndarray): Input array.\n x_length (int): Dimension of the input array, e.g. feature dimension.\n\n Return:\n Adjusted feature array and a boolean flag for multiple inputs.\n '
return _adjust_input(x, x_length, allow_single_array=False)
|
def adjust_parameters(x: np.ndarray, x_length: int) -> Tuple[(np.ndarray, bool)]:
'Adjust the parameter vector to the form [[]] if necessary.\n\n In contrast to feature vectors, one dimensional parameters are not considered\n as multiple inputs.\n\n Args:\n x (np.ndarray): Input array.\n x_length (int): Dimension of the input array, e.g. feature dimension.\n\n Return:\n Adjusted parameter array and a boolean flag for multiple inputs.\n '
return _adjust_input(x, x_length, allow_single_array=True)
|
def _adjust_input(x: Union[(float, np.ndarray)], x_length: int, allow_single_array: bool) -> Tuple[(np.ndarray, bool)]:
'Adjust the input to the form [[]] if necessary.\n\n If allow_single_array is True, a one dimensional array is not considered as multiple outputs.\n\n Args:\n x (np.ndarray): Input array.\n x_length (int): Dimension of the input array, e.g. feature dimension.\n allow_single_array (bool): If True, a one dimensional array is not considered as\n multiple outputs.\n\n Return:\n Adjusted input array and a boolean flag for multiple inputs.\n '
multiple_inputs = False
error = False
shape = np.shape(x)
if ((shape == ()) and (x_length == 1)):
xx = np.array([[x]])
elif (len(shape) == 1):
if (x_length == 1):
xx = np.array([np.array([xx]) for xx in x])
if allow_single_array:
multiple_inputs = (shape[0] != 1)
else:
multiple_inputs = True
elif (len(x) == x_length):
xx = np.array([x])
else:
error = True
elif (len(shape) == 2):
if (shape[1] == x_length):
xx = x
multiple_inputs = True
else:
error = True
else:
error = True
if error:
raise ValueError('Wrong format of an input variable.')
return (xx, multiple_inputs)
|
class Executor():
'\n A class for executing quantum jobs on IBM Quantum systems or simulators.\n\n The Executor class is the central component of sQUlearn, responsible for running quantum jobs.\n Both high- and low-level methods utilize the Executor class to execute jobs seamlessly.\n It automatically creates the necessary primitives when they are required in the sQUlearn\n sub-program. The Executor takes care about session handling, result caching, and automatic\n restarts of failed jobs.\n\n The Estimator can be initialized with various objects that specify the execution environment,\n as for example a Qiskit backend either from IBM Quantum or a Aer simulator.\n\n A detailed introduction to the Executor can be found in the\n :doc:`User Guide: The Executor Class </user_guide/executor>`\n\n Args:\n execution (Union[str, Backend, QiskitRuntimeService, Session, BaseEstimator, BaseSampler]): The execution environment, possible inputs are:\n\n * A string, that specifics the simulator\n backend (``"statevector_simulator"`` or ``"qasm_simulator"``)\n * A Qiskit backend, to run the jobs on IBM Quantum\n systems or simulators\n * A QiskitRuntimeService, to run the jobs on the Qiskit Runtime service\n In this case the backend has to be provided separately via ``backend=``\n * A Session, to run the jobs on the Qiskit Runtime service\n * A Estimator primitive (either simulator or Qiskit Runtime primitive)\n * A Sampler primitive (either simulator or Qiskit Runtime primitive)\n\n Default is the initialization with the :class:`StatevectorSimulator`.\n backend (Union[Backend, str, None]): The backend that is used for the execution.\n Only mandatory if a service is provided.\n options_estimator (Union[Options, Options, None]): The options for the created estimator\n primitives.\n options_sampler (Union[Options, Options, None]): The options for the created sampler\n primitives.\n log_file (str): The name of the log file, if empty, no log file is created.\n caching (Union[bool, None]): Whether to cache the results of the jobs.\n cache_dir (str): The directory where to cache the results of the jobs.\n max_session_time (str): The maximum time for a session, similar input as in Qiskit.\n max_jobs_retries (int): The maximum number of retries for a job\n until the execution is aborted.\n wait_restart (int): The time to wait before restarting a job in seconds.\n shots (Union[int, None]): The number of initial shots that is used for the execution.\n\n Attributes:\n -----------\n\n Attributes:\n execution (str): String of the execution environment.\n backend (Backend): The backend that is used in the Executor.\n session (Session): The session that is used in the Executor.\n service (QiskitRuntimeService): The service that is used in the Executor.\n estimator (BaseEstimator): The Qiskit estimator primitive that is used in the Executor.\n Different to :meth:`get_estimator`,\n which creates a new estimator object with overwritten methods\n that runs everything through the Executor with\n :meth:`estimator_run`.\n sampler (BaseSampler): The Qiskit sampler primitive that is used in the Executor.\n Different to :meth:`get_sampler`,\n which creates a new sampler object with overwritten methods\n that runs everything through the Executor with\n :meth:`estimator_run`.\n shots (int): The number of shots that is used in the Executor.\n\n See Also:\n * :doc:`User Guide: The Executor Class </user_guide/executor>`\n * `Qiskit Runtime <https://quantum-computing.ibm.com/lab/docs/iql/runtime>`_\n * `Qsikit Primitives <https://qiskit.org/documentation/apidoc/primitives.html>`_\n\n **Example: Different initializations of the Executor**\n\n .. code-block:: python\n\n from squlearn import Executor\n from qiskit_ibm_runtime import QiskitRuntimeService\n\n # Executor with a ideal simulator backend\n exec = Executor("statevector_simulator")\n\n # Executor with a shot-based simulator backend and 1000 shots\n exec = Executor("qasm_simulator")\n exec.set_shots(1000)\n\n # Executor with a IBM Quantum backend\n service = QiskitRuntimeService(channel="ibm_quantum", token="INSERT_YOUR_TOKEN_HERE")\n executor = Executor(service.get_backend(\'ibm_nairobi\'))\n\n # Executor with a IBM Quantum backend and caching and logging\n service = QiskitRuntimeService(channel="ibm_quantum", token="INSERT_YOUR_TOKEN_HERE")\n executor = Executor(service.get_backend(\'ibm_nairobi\'), caching=True,\n cache_dir=\'cache\', log_file="log.log")\n\n **Example: Get the Executor based primitives**\n\n .. jupyter-execute::\n\n from squlearn import Executor\n\n # Initialize the Executor\n executor = Executor("statevector_simulator")\n\n # Get the Executor based Estimator - can be used as a normal Qiskit Estimator\n estimator = executor.get_estimator()\n\n # Get the Executor based Sampler - can be used as a normal Qiskit Sampler\n sampler = executor.get_sampler()\n\n .. jupyter-execute::\n\n # Run a circuit with the Executor based Sampler\n from qiskit.circuit.random import random_circuit\n circuit = random_circuit(2, 2, seed=1, measure=True).decompose(reps=1)\n job = sampler.run(circuit)\n result = job.result()\n\n\n Methods:\n --------\n '
def __init__(self, execution: Union[(str, Backend, QiskitRuntimeService, Session, BaseEstimator, BaseSampler)]='statevector_simulator', backend: Union[(Backend, str, None)]=None, options_estimator: Union[(Options, qiskit_ibm_runtime_Options)]=None, options_sampler: Union[(Options, qiskit_ibm_runtime_Options)]=None, log_file: str='', caching: Union[(bool, None)]=None, cache_dir: str='_cache', max_session_time: str='8h', max_jobs_retries: int=10, wait_restart: int=1, shots: Union[(int, None)]=None, primitive_seed: Union[(int, None)]=None) -> None:
self._backend = None
self._session = None
self._service = None
self._estimator = None
self._sampler = None
self._remote = False
self._session_active = False
self._execution_origin = ''
self._options_estimator = options_estimator
if (self._options_estimator is None):
self._options_estimator = {}
self._options_sampler = options_sampler
if (self._options_sampler is None):
self._options_sampler = {}
self._set_seed_for_primitive = primitive_seed
self._log_file = log_file
self._caching = caching
self._max_session_time = max_session_time
self._max_jobs_retries = max_jobs_retries
self._wait_restart = wait_restart
if (self._log_file != ''):
fh = handlers.RotatingFileHandler(self._log_file, maxBytes=(1048576 * 5), backupCount=100)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(log_format)
self._logger = logging.getLogger('executor')
self._logger.addHandler(fh)
self._logger.setLevel(logging.INFO)
else:
self._logger = logging.getLogger('executor')
self._logger.setLevel(logging.INFO)
if ((execution is None) and (backend is not None)):
execution = backend
if isinstance(execution, str):
if (execution == 'statevector_simulator'):
self._backend = Aer.get_backend(execution)
elif (execution == 'qasm_simulator'):
self._backend = Aer.get_backend(execution)
shots_backend = self._backend.options.shots
if (shots is None):
shots = shots_backend
elif ('ibm' in execution):
raise ValueError(('IBM backend are not supported by string input, since credentials are missing ' + execution))
else:
raise ValueError(('Unknown backend string: ' + execution))
self._execution_origin = 'Simulator'
elif isinstance(execution, Backend):
if hasattr(execution, 'service'):
self._service = execution.service
self._backend = execution
self._execution_origin = 'Backend'
if (shots is None):
shots = self._backend.options.shots
if ('statevector_simulator' in str(self._backend)):
shots = None
elif isinstance(execution, QiskitRuntimeService):
self._service = execution
if isinstance(backend, str):
self._backend = self._service.get_backend(backend)
elif isinstance(backend, Backend):
self._backend = backend
elif (backend is None):
raise ValueError('Backend has to be specified for QiskitRuntimeService')
else:
raise ValueError(('Unknown backend type: ' + backend))
if (shots is None):
shots = self._backend.options.shots
if ('statevector_simulator' in str(self._backend)):
shots = None
self._execution_origin = 'QiskitRuntimeService'
elif isinstance(execution, Session):
self._session = execution
self._service = self._session.service
self._backend = self._session.service.get_backend(self._session.backend())
self._session_active = True
self._execution_origin = 'Session'
if (shots is None):
shots = self._backend.options.shots
if ('statevector_simulator' in str(self._backend)):
shots = None
elif isinstance(execution, BaseEstimator):
self._estimator = execution
if isinstance(self._estimator, qiskit_primitives_Estimator):
self._backend = Aer.get_backend('statevector_simulator')
elif isinstance(self._estimator, qiskit_primitives_BackendEstimator):
self._backend = self._estimator._backend
shots_estimator = self._estimator.options.get('shots', 0)
if (shots_estimator == 0):
if (shots is None):
shots = 1024
self._estimator.set_options(shots=shots)
else:
shots = shots_estimator
elif hasattr(self._estimator, 'session'):
self._session = self._estimator.session
self._service = self._estimator.session.service
self._backend = self._estimator.session.service.get_backend(self._estimator.session.backend())
self._session_active = True
else:
raise RuntimeError('No backend found in the given Estimator Primitive!')
if (self._options_estimator is None):
self._options_estimator = self._estimator.options
else:
self._estimator.options.update_options(**self._options_estimator)
self._execution_origin = 'Estimator'
elif isinstance(execution, BaseSampler):
self._sampler = execution
if isinstance(self._sampler, qiskit_primitives_Sampler):
self._backend = Aer.get_backend('statevector_simulator')
elif isinstance(self._sampler, qiskit_primitives_BackendSampler):
self._backend = self._sampler._backend
shots_sampler = self._sampler.options.get('shots', 0)
if (shots_sampler == 0):
if (shots is None):
shots = 1024
self._sampler.set_options(shots=shots)
else:
shots = shots_sampler
elif hasattr(self._sampler, 'session'):
self._session = self._sampler.session
self._service = self._sampler.session.service
self._backend = self._sampler.session.service.get_backend(self._sampler.session.backend())
self._session_active = True
else:
raise RuntimeError('No backend found in the given Sampler Primitive!')
if (self._options_sampler is None):
self._options_sampler = self._sampler.options
else:
self._sampler.options.update_options(**self._options_sampler)
self._execution_origin = 'Sampler'
else:
raise ValueError(('Unknown execution type: ' + str(type(execution))))
if ('ibm' in str(self._backend)):
self._remote = True
else:
self._remote = False
self._shots = shots
self.set_shots(shots)
self._inital_num_shots = self.get_shots()
if (self._caching is None):
self._caching = self._remote
if self._caching:
self._cache = ExecutorCache(self._logger, cache_dir)
self._logger.info(f'Executor initialized with backend: {{}}'.format(self._backend))
self._logger.info(f'Executor initialized with service: {{}}'.format(self._service))
if (self._session is not None):
self._logger.info(f'Executor initialized with session: {{}}'.format(self._session.session_id))
else:
self._logger.info(f'Executor initialized with session: {{}}'.format(self._session))
self._logger.info(f'Executor initialized with estimator: {{}}'.format(self._estimator))
self._logger.info(f'Executor initialized with sampler: {{}}'.format(self._sampler))
self._logger.info(f'Executor intial shots: {{}}'.format(self._inital_num_shots))
@property
def execution(self) -> str:
'Returns a string of the execution that is used to initialize the executor class.'
return self._execution_origin
@property
def backend(self) -> Backend:
'Returns the backend that is used in the executor.'
return self._backend
@property
def session(self) -> Session:
'Returns the session that is used in the executor.'
return self._session
@property
def service(self) -> QiskitRuntimeService:
'Returns the service that is used in the executor.'
return self._service
@property
def estimator(self) -> BaseEstimator:
'Returns the estimator primitive that is used for the execution.\n\n This function created automatically estimators and checks for an expired session and\n creates a new one if necessary.\n Note that the run function is the same as in the Qiskit primitives, and\n does not support caching and restarts\n For this use :meth:`sampler_run` or :meth:`get_sampler`.\n\n The estimator that is created depends on the backend that is used for the execution.\n '
if (self._estimator is not None):
if ((self._session is not None) and (self._session_active is False)):
self.create_session()
self._estimator = qiskit_ibm_runtime_Estimator(session=self._session, options=self._options_estimator)
estimator = self._estimator
else:
shots = self.get_shots()
if (self._session is not None):
if (self._session_active is False):
self.create_session()
self._estimator = qiskit_ibm_runtime_Estimator(session=self._session, options=self._options_estimator)
elif (self._service is not None):
self.create_session()
self._estimator = qiskit_ibm_runtime_Estimator(session=self._session, options=self._options_estimator)
elif ('statevector_simulator' in str(self._backend)):
self._estimator = qiskit_primitives_Estimator(options=self._options_estimator)
self._estimator.set_options(shots=self._shots)
else:
self._estimator = qiskit_primitives_BackendEstimator(backend=self._backend, options=self._options_estimator)
if (shots is None):
shots = 1024
if (not self._options_estimator):
self.set_shots(shots)
estimator = self._estimator
return estimator
def clear_estimator_cache(self) -> None:
'Function for clearing the cache of the estimator primitive to avoid memory overflow.'
if (self._estimator is not None):
if (isinstance(self._estimator, qiskit_primitives_Estimator) or isinstance(self._estimator, qiskit_primitives_BackendEstimator)):
self._estimator._circuits = []
self._estimator._observables = []
self._estimator._parameters = []
self._estimator._circuit_ids = {}
self._estimator._observable_ids = {}
@property
def sampler(self) -> BaseSampler:
'Returns the sampler primitive that is used for the execution.\n\n This function created automatically estimators and checks for an expired session and\n creates a new one if necessary.\n\n Note that the run function is the same as in the Qiskit primitives, and\n does not support caching, session handing, etc.\n For this use :meth:`sampler_run` or :meth:`get_sampler`.\n\n The estimator that is created depends on the backend that is used for the execution.\n '
if (self._sampler is not None):
if ((self._session is not None) and (self._session_active is False)):
self.create_session()
self._sampler = qiskit_ibm_runtime_Sampler(session=self._session, options=self._options_sampler)
sampler = self._sampler
else:
shots = self.get_shots()
if (self._session is not None):
if (self._session_active is False):
self.create_session()
self._sampler = qiskit_ibm_runtime_Sampler(session=self._session, options=self._options_sampler)
elif (self._service is not None):
self.create_session()
self._sampler = qiskit_ibm_runtime_Sampler(session=self._session, options=self._options_sampler)
elif ('statevector_simulator' in str(self._backend)):
self._sampler = qiskit_primitives_Sampler(options=self._options_sampler)
self._sampler.set_options(shots=self._shots)
else:
self._sampler = qiskit_primitives_BackendSampler(backend=self._backend, options=self._options_sampler)
if (shots is None):
shots = 1024
if (not self._options_sampler):
self.set_shots(shots)
sampler = self._sampler
return sampler
def clear_sampler_cache(self) -> None:
'Function for clearing the cache of the sampler primitive to avoid memory overflow.'
if (self._sampler is not None):
if (isinstance(self._sampler, qiskit_primitives_Sampler) or isinstance(self._sampler, qiskit_primitives_BackendSampler)):
self._sampler._circuits = []
self._sampler._parameters = []
self._sampler._circuit_ids = {}
self._sampler._qargs_list = []
def _primitive_run(self, run: callable, label: str, hash_value: Union[(str, None)]=None) -> Job:
'Run function that allow restarting, session handling and caching.\n\n Parent implementation that is used for both, Estimator and Sampler.\n\n Args:\n run (callable): Run function of the primitive\n label (str): Label that is used for logging.\n hash_value (str,None): Hash value that is used for caching.\n\n Returns:\n A qiskit job containing the results of the run.\n '
success = False
critical_error = False
critical_error_message = None
for repeat in range(self._max_jobs_retries):
try:
job = None
cached = False
if ((hash_value is not None) and self._caching):
job = self._cache.get_file(hash_value)
if (job is None):
job = run()
self._logger.info(((f'Executor runs ' + label) + f' with job: {{}}'.format(job.job_id())))
else:
self._logger.info(f'Cached job found with hash value: {{}}'.format(hash_value))
cached = True
except IBMRuntimeError as e:
if ('"code":1217' in e.message):
self._logger.info(((f'Executor failed to run ' + label) + f' because the session has been closed!'))
self._session_active = False
continue
except QiskitError as e:
critical_error = True
critical_error_message = e
except Exception as e:
critical_error = True
critical_error_message = e
self._logger.info(((f'Executor failed to run ' + label) + f' because of unknown error!'))
self._logger.info(f'Error message: {{}}'.format(e))
self._logger.info(f'Traceback: {{}}'.format(traceback.print_exc()))
if (job is None):
if ('simulator' in str(self._backend)):
critical_error = True
critical_error_message = RuntimeError('Failed to execute job on simulator!')
else:
if (not cached):
status = JobStatus.QUEUED
last_status = None
else:
status = JobStatus.DONE
while (status not in JOB_FINAL_STATES):
try:
status = job.status()
if (status != last_status):
self._logger.info(f'Job status: {{}}'.format(status))
last_status = status
except Exception as e:
self._logger.info(f'Executor failed to get job status because of unknown error!')
self._logger.info(f'Error message: {{}}'.format(e))
self._logger.info(f'Traceback: {{}}'.format(traceback.print_exc()))
break
if self._remote:
time.sleep(1)
else:
time.sleep(0.01)
if (status == JobStatus.ERROR):
self._logger.info(f'Failed executation of the job!')
try:
self._logger.info(f'Error message: {{}}'.format(job.error_message()))
except Exception as e:
try:
job.result()
except Exception as e2:
pass
critical_error = True
critical_error_message = e2
elif (status == JobStatus.CANCELLED):
self._logger.info(f'Job has been manually cancelled, and is resubmitted!')
self._logger.info(f'To stop resubmitting the job, cancel the execution script first.')
else:
success = True
result_success = False
for retry_result in range(3):
try:
result = job.result()
result_success = True
except RuntimeJobFailureError as e:
self._logger.info(f'Executor unable to retriev job result!')
self._logger.info(f'Error message: {{}}'.format(e))
except Exception as e:
self._logger.info(f'Executor failed to get job result because of unknown error!')
self._logger.info(f'Error message: {{}}'.format(e))
self._logger.info(f'Traceback: {{}}'.format(traceback.print_exc()))
if result_success:
break
else:
self._logger.info(f'Retrying to get job result')
time.sleep(self._wait_restart)
if (success and result_success):
break
elif (critical_error is False):
self._logger.info(((f'Restarting ' + label) + f' run'))
success = False
result_success = False
if critical_error:
self._logger.info(f'Critical error detected; abort execution')
raise critical_error_message
if (success is not True):
raise RuntimeError(f'Could not run job successfully after {{}} retries'.format(self._max_jobs_retries))
if (self._caching and (not cached)):
job_pickle = copy.copy(job)
job_pickle._future = None
job_pickle._function = None
job_pickle._api_client = None
job_pickle._service = None
job_pickle._ws_client_future = None
job_pickle._ws_client = None
try:
job_pickle._backend = str(job.backend())
except QiskitError:
job_pickle._backend = self.backend
def result_():
return result
job_pickle.result = result_
self._cache.store_file(hash_value, job_pickle)
self._logger.info(f'Stored job in cache with hash value: {{}}'.format(hash_value))
return job
def estimator_run(self, circuits, observables, parameter_values=None, **kwargs: Any) -> Job:
'\n Function similar to the Qiskit Sampler run function, but this one includes caching,\n automatic session handling, and restarts of failed jobs.\n\n Args:\n circuits: Quantum circuits to execute.\n observables: Observable to measure.\n parameter_values: Values for the parameters in circuits.\n kwargs (Any): Additional arguments that are passed to the estimator.\n\n Returns:\n A qiskit job containing the results of the run.\n '
if isinstance(self.estimator, qiskit_primitives_BackendEstimator):
if (self._set_seed_for_primitive is not None):
kwargs['seed_simulator'] = self._set_seed_for_primitive
self._set_seed_for_primitive += 1
elif isinstance(self.estimator, qiskit_primitives_Estimator):
if (self._set_seed_for_primitive is not None):
self.estimator.set_options(seed=self._set_seed_for_primitive)
self._set_seed_for_primitive += 1
def run():
return self.estimator.run(circuits, observables, parameter_values, **kwargs)
if self._caching:
hash_value = self._cache.hash_variable(['estimator', circuits, observables, parameter_values, kwargs, self._options_estimator, self._backend])
else:
hash_value = None
return self._primitive_run(run, 'estimator', hash_value)
def sampler_run(self, circuits, parameter_values=None, **kwargs: Any) -> Job:
'\n Function similar to the Qiskit Sampler run function, but this one includes caching,\n automatic session handling, and restarts of failed jobs.\n\n Args:\n circuits: Quantum circuits to execute.\n parameter_values: Values for the parameters in circuits.\n kwargs (Any): Additional arguments that are passed to the estimator.\n\n Returns:\n A qiskit job containing the results of the run.\n '
if isinstance(self.sampler, qiskit_primitives_BackendSampler):
if (self._set_seed_for_primitive is not None):
kwargs['seed_simulator'] = self._set_seed_for_primitive
self._set_seed_for_primitive += 1
elif isinstance(self.sampler, qiskit_primitives_Sampler):
if (self._set_seed_for_primitive is not None):
self.sampler.set_options(seed=self._set_seed_for_primitive)
self._set_seed_for_primitive += 1
def run():
return self.sampler.run(circuits, parameter_values, **kwargs)
if self._caching:
hash_value = self._cache.hash_variable(['sampler', circuits, parameter_values, kwargs, self._options_sampler, self._backend])
else:
hash_value = None
return self._primitive_run(run, 'sampler', hash_value)
def get_estimator(self):
'\n Returns a Estimator primitive that overwrites the Qiskit Estimator primitive.\n This Estimator runs all executions through the Executor and\n includes result caching, automatic session handling, and restarts of failed jobs.\n '
return ExecutorEstimator(executor=self, options=self._options_estimator)
def get_sampler(self):
'\n Returns a Sampler primitive that overwrites the Qiskit Sampler primitive.\n This Sampler runs all executions through the Executor and\n includes result caching, automatic session handling, and restarts of failed jobs.\n '
return ExecutorSampler(executor=self, options=self._options_sampler)
@property
def optree_executor(self) -> str:
'A string that indicates which executor is used for OpTree execution.'
if (self._estimator is not None):
return 'estimator'
elif (self._sampler is not None):
return 'sampler'
else:
return 'estimator'
def backend_run(self, run_input, **options):
'Routine that runs the given circuits on the backend.\n\n Args:\n run_input: An object to run on the backend (typically a circuit).\n options: Additional arguments that are passed to the backend.\n\n Return:\n The Qiskit job object from the run.\n '
return self.backend.run(run_input, **options)
def set_shots(self, num_shots: Union[(int, None)]) -> None:
'Sets the number shots for the next evaluations.\n\n Args:\n num_shots (int or None): Number of shots that are set\n '
self._shots = num_shots
if (num_shots is None):
self._logger.info('Set shots to {}'.format(num_shots))
num_shots = 0
if (self._backend is not None):
if ('statevector_simulator' not in str(self._backend)):
self._backend.options.shots = num_shots
if (self._estimator is not None):
if isinstance(self._estimator, qiskit_primitives_Estimator):
if (num_shots == 0):
self._estimator.set_options(shots=None)
else:
self._estimator.set_options(shots=num_shots)
try:
self._options_estimator['shots'] = num_shots
except:
pass
elif isinstance(self._estimator, qiskit_primitives_BackendEstimator):
self._estimator.set_options(shots=num_shots)
try:
self._options_estimator['shots'] = num_shots
except:
pass
elif isinstance(self._estimator, qiskit_ibm_runtime_Estimator):
execution = self._estimator.options.get('execution')
execution['shots'] = num_shots
self._estimator.set_options(execution=execution)
try:
self._options_estimator['execution']['shots'] = num_shots
except:
pass
else:
raise RuntimeError('Unknown estimator type!')
if (self._sampler is not None):
if isinstance(self._sampler, qiskit_primitives_Sampler):
if (num_shots == 0):
self._sampler.set_options(shots=None)
else:
self._sampler.set_options(shots=num_shots)
try:
self._options_sampler['shots'] = num_shots
except:
pass
elif isinstance(self._sampler, qiskit_primitives_BackendSampler):
self._sampler.set_options(shots=num_shots)
try:
self._options_sampler['shots'] = num_shots
except:
pass
elif isinstance(self._sampler, qiskit_ibm_runtime_Sampler):
execution = self._sampler.options.get('execution')
execution['shots'] = num_shots
self._sampler.set_options(execution=execution)
try:
self._options_sampler['execution']['shots'] = num_shots
except:
pass
else:
raise RuntimeError('Unknown sampler type!')
def get_shots(self) -> int:
'Getter for the number of shots.\n\n Returns:\n Returns the number of shots that are used for the current evaluation.\n '
shots = self._shots
if ((self._estimator is not None) or (self._sampler is not None)):
shots_estimator = 0
shots_sampler = 0
if (self._estimator is not None):
if isinstance(self._estimator, qiskit_primitives_Estimator):
shots_estimator = self._estimator.options.get('shots', 0)
elif isinstance(self._estimator, qiskit_primitives_BackendEstimator):
shots_estimator = self._estimator.options.get('shots', 0)
elif isinstance(self._estimator, qiskit_ibm_runtime_Estimator):
execution = self._estimator.options.get('execution')
shots_estimator = execution['shots']
else:
raise RuntimeError('Unknown estimator type!')
if (self._sampler is not None):
if isinstance(self._sampler, qiskit_primitives_Sampler):
shots_sampler = self._sampler.options.get('shots', 0)
elif isinstance(self._sampler, qiskit_primitives_BackendSampler):
shots_sampler = self._sampler.options.get('shots', 0)
elif isinstance(self._sampler, qiskit_ibm_runtime_Sampler):
execution = self._sampler.options.get('execution')
shots_sampler = execution['shots']
else:
raise RuntimeError('Unknown sampler type!')
if ((self._estimator is not None) and (self._sampler is not None)):
if (shots_estimator != shots_sampler):
raise ValueError('The number of shots of the given Estimator and Sampler is not equal!')
if (shots_estimator is None):
shots_estimator = 0
if (shots_sampler is None):
shots_sampler = 0
shots = max(shots_estimator, shots_sampler)
elif (self._backend is not None):
if ('statevector_simulator' not in str(self._backend)):
shots = self._backend.options.shots
else:
return None
if (shots == 0):
shots = None
self._shots = shots
return shots
def reset_shots(self) -> None:
'Resets the shots to the initial values when the executor was created.'
self.set_shots(self._inital_num_shots)
@property
def shots(self) -> int:
'Number of shots in the execution.'
return self.get_shots()
def create_session(self):
'Creates a new session, is called automatically.'
if (self._service is not None):
self._session = Session(self._service, backend=self._backend, max_time=self._max_session_time)
self._session_active = True
self._logger.info(f'Executor created a new session.')
else:
raise RuntimeError('Session can not started because of missing service!')
def close_session(self):
'Closes the current session, is called automatically.'
if (self._session is not None):
self._logger.info(f'Executor closed session: {{}}'.format(self._session.session_id))
self._session.close()
self._session = None
else:
raise RuntimeError('No session found!')
def __del__(self):
'Terminate the session in case the executor is deleted'
if (self._session is not None):
try:
self.close_session()
except:
pass
def set_options_estimator(self, **fields):
'Set options values for the estimator.\n\n Args:\n **fields: The fields to update the options\n '
self.estimator.set_options(**fields)
self._options_estimator = self.estimator.options
def set_options_sampler(self, **fields):
'Set options values for the sampler.\n\n Args:\n **fields: The fields to update the options\n '
self.sampler.set_options(**fields)
self._options_sampler = self.sampler.options
def set_primitive_options(self, **fields):
'Set options values for the estimator and sampler primitive.\n\n Args:\n **fields: The fields to update the options\n '
self.set_options_estimator(**fields)
self.set_options_sampler(**fields)
def reset_options_estimator(self, options: Union[(Options, qiskit_ibm_runtime_Options)]):
'\n Overwrites the options for the estimator primitive.\n\n Args:\n options: Options for the estimator\n '
self._options_estimator = options
if isinstance(options, qiskit_ibm_runtime_Options):
self.estimator._options = asdict(options)
else:
self.estimator._run_options = Options()
self.estimator._run_options.update_options(**options)
def reset_options_sampler(self, options: Union[(Options, qiskit_ibm_runtime_Options)]):
'\n Overwrites the options for the sampler primitive.\n\n Args:\n options: Options for the sampler\n '
self._options_sampler = options
if isinstance(options, qiskit_ibm_runtime_Options):
self.sampler._options = asdict(options)
else:
self.sampler._run_options = Options()
self.sampler._run_options.update_options(**options)
def reset_options(self, options: Union[(Options, qiskit_ibm_runtime_Options)]):
'\n Overwrites the options for the sampler and estimator primitive.\n\n Args:\n options: Options for the sampler and estimator\n '
self.reset_options_estimator(options)
self.reset_options_sampler(options)
def set_seed_for_primitive(self, seed: int=0):
'Set options values for the estimator run.\n\n Args:\n **fields: The fields to update the options\n '
self._set_seed_for_primitive = seed
|
class ExecutorEstimator(BaseEstimator):
'\n Special Estimator Primitive that uses the Executor service.\n\n Usefull for automatic restarting sessions and caching results.\n The object is created by the Executor method get_estimator()\n\n Args:\n executor (Executor): The executor service to use\n options: Options for the estimator\n\n '
def __init__(self, executor: Executor, options=None):
if (isinstance(options, Options) or isinstance(options, qiskit_ibm_runtime_Options)):
try:
options_ini = copy.deepcopy(options).__dict__
except:
options_ini = asdict(copy.deepcopy(options))
else:
options_ini = options
super().__init__(options=options_ini)
self._executor = executor
def _call(self, circuits, observables, parameter_values=None, **run_options) -> EstimatorResult:
"Has to be passed through, otherwise python will complain about the abstract method.\n Input arguments are the same as in Qiskit's estimator.call()\n "
return self._executor.estimator._call(circuits, observables, parameter_values, **run_options)
def _run(self, circuits, observables, parameter_values, **run_options) -> Job:
"Has to be passed through, otherwise python will complain about the abstract method.\n Input arguments are the same as in Qiskit's estimator.run().\n "
return self._executor.estimator_run(circuits=circuits, observables=observables, parameter_values=parameter_values, **run_options)
def run(self, circuits, observables, parameter_values=None, **run_options) -> Job:
"\n Overwrites the sampler primitive run method, to evaluate expectation values.\n Uses the Executor class for automatic session handling.\n\n Input arguments are the same as in Qiskit's estimator.run()\n\n "
return self._executor.estimator_run(circuits=circuits, observables=observables, parameter_values=parameter_values, **run_options)
@property
def circuits(self):
'Quantum circuits that represents quantum states.\n\n Returns:\n The quantum circuits.\n '
return tuple(self._executor.estimator.circuits)
@property
def observables(self):
'Observables to be estimated.\n\n Returns:\n The observables.\n '
return tuple(self._executor.estimator.observables)
@property
def parameters(self):
'Parameters of the quantum circuits.\n\n Returns:\n Parameters, where ``parameters[i][j]`` is the j-\\ :spelling:word:`th` parameter of the\n i-th circuit.\n '
return tuple(self._executor.estimator.parameters)
@property
def options(self) -> Options:
'Return options values for the estimator.\n\n Returns:\n options\n '
return self._executor.estimator.options
def clear_cache(self):
self._executor.clear_estimator_cache()
def set_options(self, **fields):
'Set options values for the estimator.\n\n Args:\n **fields: The fields to update the options\n '
self._executor.estimator.set_options(**fields)
self._executor._options_estimator = self._executor.estimator.options
|
class ExecutorSampler(BaseSampler):
'\n Special Sampler Primitive that uses the Executor service.\n\n Useful for automatic restarting sessions and caching the results.\n The object is created by the executor method get_sampler()\n\n Args:\n executor (Executor): The executor service to use\n options: Options for the sampler\n\n '
def __init__(self, executor: Executor, options=None):
if (isinstance(options, Options) or isinstance(options, qiskit_ibm_runtime_Options)):
try:
options_ini = copy.deepcopy(options).__dict__
except:
options_ini = asdict(copy.deepcopy(options))
else:
options_ini = options
super().__init__(options=options_ini)
self._executor = executor
def run(self, circuits, parameter_values=None, **run_options) -> Job:
"\n Overwrites the sampler primitive run method, to evaluate circuits.\n Uses the Executor class for automatic session handling.\n\n Input arguments are the same as in Qiskit's sampler.run()\n\n "
return self._executor.sampler_run(circuits=circuits, parameter_values=parameter_values, **run_options)
def _run(self, circuits, parameter_values=None, **run_options) -> Job:
"\n Overwrites the sampler primitive run method, to evaluate circuits.\n Uses the Executor class for automatic session handling.\n\n Input arguments are the same as in Qiskit's sampler.run()\n\n "
return self._executor.sampler_run(circuits=circuits, parameter_values=parameter_values, **run_options)
def _call(self, circuits, parameter_values=None, **run_options) -> SamplerResult:
'Has to be passed through, otherwise python will complain about the abstract method'
return self._executor.sampler._call(circuits, parameter_values, **run_options)
@property
def circuits(self):
'Quantum circuits to be sampled.\n\n Returns:\n The quantum circuits to be sampled.\n '
return tuple(self._executor.sampler.circuits)
@property
def parameters(self):
'Parameters of quantum circuits.\n\n Returns:\n List of the parameters in each quantum circuit.\n '
return tuple(self._executor.sampler.parameters)
@property
def options(self) -> Options:
'Return options values for the estimator.\n\n Returns:\n options\n '
return self._executor.sampler.options
def set_options(self, **fields):
'Set options values for the estimator.\n\n Args:\n **fields: The fields to update the options\n '
self._executor.sampler.set_options(**fields)
self._executor._options_sampler = self._executor.sampler.options
def clear_cache(self):
self._executor.clear_sampler_cache()
|
class ExecutorCache():
'Cache for jobs that are created by Primitives\n\n Args:\n folder (str): Folder to store the cache\n\n '
def __init__(self, logger, folder: str=''):
self._folder = folder
try:
import os
if (not os.path.exists(self._folder)):
os.makedirs(self._folder)
except:
raise RuntimeError('Could not create folder for cache')
self._logger = logger
def hash_variable(self, variable: Any):
'\n Creates a hash value for a list of circuits, parameters, operators.\n\n The hash value is used as the filename for the cached file.\n '
def make_recursive_str(variable_):
'creates a string from a list'
if (type(variable_) == list):
text = ''
for i in variable_:
text += make_recursive_str(i)
return text
else:
return str(variable_)
return blake2b(make_recursive_str(variable).encode('utf-8'), digest_size=20).hexdigest()
def get_file(self, hash_value: str):
'\n Searches for the cahced file and returns the file otherwise return None.\n\n Args:\n hash_value (str): Hash value of the file\n '
try:
file = Path((((self._folder + '/') + str(hash_value)) + '.p'))
if file.exists():
file = open((((self._folder + '/') + str(hash_value)) + '.p'), 'rb')
data = pickle.load(file)
file.close()
return data
else:
return None
except:
self._logger.info('Could not load job from cache!')
self._logger.info((((('File: ' + self._folder) + '/') + str(hash_value)) + '.p'))
return None
def store_file(self, hash_value: str, job_data):
'\n Store the data of a finsihed job.\n\n Args:\n hash_value (str): Hash value of the job that is used as a file name\n job_data: Data of the job\n '
try:
file = open((((self._folder + '/') + str(hash_value)) + '.p'), 'wb')
pickle.dump(job_data, file)
file.close()
except:
raise RuntimeError('Could not store job in cache')
|
class OpTreeElementBase():
'Base class for elements of the OpTree.'
pass
|
class OpTreeNodeBase(OpTreeElementBase):
'Base class for nodes in the OpTree.\n\n Args:\n children_list (list): A list of children of the node.\n factor_list (list): A list of factors for each child.\n operation_list (list): A list of operations that are applied to each child.\n '
def __init__(self, children_list: Union[(None, List[OpTreeElementBase])]=None, factor_list: Union[(None, List[float])]=None, operation_list: Union[(None, List[Callable], List[None])]=None) -> None:
if (children_list is not None):
self._children_list = children_list
if (factor_list is not None):
if (len(children_list) != len(factor_list)):
raise ValueError('circuit_list and factor_list must have the same length')
self._factor_list = factor_list
else:
self._factor_list = [1.0 for i in range(len(children_list))]
if (operation_list is not None):
if (len(children_list) != len(operation_list)):
raise ValueError('circuit_list and operation_list must have the same length')
self._operation_list = operation_list
else:
self._operation_list = [None for i in range(len(children_list))]
else:
self._children_list = []
self._factor_list = []
self._operation_list = []
@property
def children(self) -> List[OpTreeElementBase]:
'Returns the list of children of the node.'
return self._children_list
@property
def factor(self) -> List[float]:
'Returns the list of factors of the node.'
return self._factor_list
@property
def operation(self) -> List[Union[(Callable, None)]]:
'Returns the list of operations of the node.'
return self._operation_list
def append(self, children: OpTreeElementBase, factor: float=1.0, operation: Union[(None, Callable)]=None):
'Appends a child to the node.\n\n Args:\n children (OpTreeElementBase): The child to be appended.\n factor (float, optional): The factor that is applied to the child. Defaults to 1.0.\n operation ([type], optional): The operation that is applied to the child. Defaults to None.\n '
self._children_list.append(children)
self._factor_list.append(factor)
self._operation_list.append(operation)
def remove(self, index: Union[(List[int], int)]):
'Removes children from the node.\n\n Args:\n index (int): The list of indices of the children to be removed.\n Can also be a single index.\n '
if isinstance(index, int):
index = [index]
if (len(index) > len(self._children_list)):
raise ValueError('index must not be larger than the number of children')
if (len(index) == 0):
return None
self._children_list = [child for (i, child) in enumerate(self._children_list) if (i not in index)]
self._factor_list = [factor for (i, factor) in enumerate(self._factor_list) if (i not in index)]
self._operation_list = [operation for (i, operation) in enumerate(self._operation_list) if (i not in index)]
def __eq__(self, other) -> bool:
'Function for comparing two OpTreeNodes.\n\n Checks the following in this order:\n - Type of the nodes is the same\n - The number of children is the same\n - The factors are the same\n - The children are the same\n - The operations and factors of the children are the same\n '
if isinstance(other, type(self)):
if (len(self._children_list) != len(other._children_list)):
return False
fac_set_self = set(self._factor_list)
fac_set_other = set(other._factor_list)
if (len(fac_set_self) != len(fac_set_other)):
return False
if (fac_set_self != fac_set_other):
return False
for child in self._children_list:
if (child not in other._children_list):
return False
else:
index = other._children_list.index(child)
if ((self._factor_list[self._children_list.index(child)] != other._factor_list[index]) and (self._operation_list[self._children_list.index(child)] != other._operation_list[index])):
return False
return True
else:
return False
def copy(self):
'Function for copying a OpTreeNodeBase object.'
return type(self)(copy.deepcopy(self._children_list), copy.deepcopy(self._factor_list), copy.deepcopy(self._operation_list))
|
class OpTreeList(OpTreeNodeBase):
'A OpTree node that represents its children as a list/array/vector.\n\n Args:\n children_list (list): A list of children of the list.\n factor_list (list): A list of factors for each child.\n operation_list (list): A list of operations that are applied to each child.\n '
def __str__(self) -> str:
'Returns a string representation of the node as a list of its children.'
text = '['
for (i, child) in enumerate(self._children_list):
if isinstance(child, QuantumCircuit):
text += ((((str(self._factor_list[i]) + '*') + '\n') + str(child)) + '\n')
else:
text += ((str(self._factor_list[i]) + '*') + str(child))
if (i < (len(self._children_list) - 1)):
text += ', '
text += ']'
return text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.