code stringlengths 17 6.64M |
|---|
class OpTreeSum(OpTreeNodeBase):
'A OpTree node that sums over its children.\n\n Args:\n children_list (list): A list of children of the summation.\n factor_list (list): A list of factors for each child.\n operation_list (list): A list of operations that are applied to each child.\n '
def __str__(self) -> str:
'Returns a string representation of the node as a sum of its children.'
text = '('
for (i, child) in enumerate(self._children_list):
if isinstance(child, QuantumCircuit):
text += ((((str(self._factor_list[i]) + '*') + '\n') + str(child)) + '\n')
else:
text += ((str(self._factor_list[i]) + '*') + str(child))
if (i < (len(self._factor_list) - 1)):
text += ' + '
text += ')'
return text
|
class OpTreeLeafBase(OpTreeElementBase):
'Base class for Leafs of the OpTree.'
pass
|
class OpTreeCircuit(OpTreeLeafBase):
'A leaf of the OpTree that represents a circuit.\n\n Args:\n circuit (QuantumCircuit): The circuit that is represented by the leaf.\n '
def __init__(self, circuit: QuantumCircuit) -> None:
self._circuit = circuit
self._hashvalue = OpTree.hash_circuit(circuit)
@property
def circuit(self) -> QuantumCircuit:
'Returns the circuit that is represented by the leaf.'
return self._circuit
@property
def hashvalue(self) -> tuple:
'Returns the hashvalue of the circuit.'
return self._hashvalue
def __str__(self) -> str:
'Returns the string representation of the circuit.'
return (('\n' + str(self._circuit)) + '\n')
def __eq__(self, other) -> bool:
'Function for comparing two OpTreeLeafCircuits.'
if isinstance(other, OpTreeCircuit):
return (self._hashvalue == other._hashvalue)
return False
def copy(self):
'Function for copying a OpTreeLeafCircuit object.'
return OpTreeCircuit(self._circuit.copy())
|
class OpTreeOperator(OpTreeLeafBase):
'A leaf of the OpTree that represents an operator.\n\n Args:\n operator (SparsePauliOp): The operator that is represented by the leaf.\n '
def __init__(self, operator: SparsePauliOp) -> None:
self._operator = operator
self._hashvalue = OpTree.hash_operator(operator)
@property
def operator(self) -> SparsePauliOp:
'Returns the operator that is represented by the leaf.'
return self._operator
@property
def hashvalue(self) -> tuple:
'Returns the hashvalue of the circuit.'
return self._hashvalue
def __str__(self) -> str:
'Returns the string representation of the operator.'
return str(self._operator)
def __eq__(self, other) -> bool:
'Function for comparing two OpTreeLeafOperators.'
if isinstance(other, OpTreeOperator):
return (self._hashvalue == other._hashvalue)
return False
def copy(self):
'Function for copying a OpTreeLeafOperator object.'
return OpTreeOperator(self._operator.copy())
|
class OpTreeExpectationValue(OpTreeLeafBase):
'\n Leaf of the OpTree that represents an expectation value of a circuit and an operator.\n\n Args:\n circuit (Union[OpTreeLeafCircuit, QuantumCircuit]): The circuit in the expectation value.\n operator (Union[OpTreeLeafOperator, SparsePauliOp]): The operator in the expectation value.\n '
def __init__(self, circuit: Union[(OpTreeCircuit, QuantumCircuit)], operator: Union[(OpTreeOperator, SparsePauliOp)]) -> None:
if isinstance(circuit, QuantumCircuit):
circuit = OpTreeCircuit(circuit)
if (not isinstance(circuit, OpTreeCircuit)):
raise ValueError('Wrong format of the given circuit!')
self._circuit = circuit
if isinstance(operator, SparsePauliOp):
operator = OpTreeOperator(operator)
if (not isinstance(operator, OpTreeOperator)):
raise ValueError('Wrong format of the given operator!')
self._operator = operator
@property
def circuit(self) -> QuantumCircuit:
'Returns the circuit that is represented by the leaf.'
return self._circuit.circuit
@property
def operator(self) -> SparsePauliOp:
'Returns the operator that is represented by the leaf.'
return self._operator.operator
@property
def hashvalue(self) -> tuple:
'Returns the hashvalue of the circuit.'
return (self._circuit.hashvalue + self._operator.hashvalue)
def __str__(self) -> str:
'Returns the string representation of the expectation value.'
return (((str(self._circuit) + '\n with observable \n') + str(self._operator)) + '\n')
def __eq__(self, other) -> bool:
'Function for comparing two OpTreeLeafOperators.'
if isinstance(other, OpTreeExpectationValue):
return ((self._circuit == other._circuit) and (self._operator == other._operator))
return False
def copy(self):
'Function for copying a OpTreeLeafExpectationValue object.'
return OpTreeExpectationValue(self._circuit.copy(), self._operator.copy())
|
class OpTreeMeasuredOperator(OpTreeExpectationValue):
'\n Leaf of the OpTree that represents an measurement.\n\n The circuit in the class represents the circuit that is measured for the given operator.\n '
def measure_circuit(self, circuit: Union[(QuantumCircuit, OpTreeCircuit)]) -> OpTreeExpectationValue:
'\n Applies the measurement of the leaf to the circuit and returns an expectation value.\n\n Args:\n circuit (Union[QuantumCircuit, OpTreeLeafCircuit]): The circuit that is measured.\n\n Returns:\n OpTreeLeafExpectationValue: The expectation value leaf with the measured circuit.\n '
circuit_ = circuit
if isinstance(circuit, OpTreeCircuit):
circuit_ = circuit
return OpTreeExpectationValue(circuit_.compose(self.circuit), self.operator)
def copy(self):
'Function for copying a OpTreeLeafMeasuredOperator object.'
return OpTreeMeasuredOperator(self._circuit.copy(), self._operator.copy())
|
class OpTreeContainer(OpTreeLeafBase):
'\n A container for arbitrary objects that can be used as leafs in the OpTree.\n\n Args:\n item (Any): Any kind of item that is represented by the leaf.\n '
def __init__(self, item: Any) -> None:
self.item = item
def __str__(self) -> str:
'Returns the string representation of the object.'
return str(self.item)
def __eq__(self, other) -> bool:
'Function for comparing two OpTreeLeafContainers.'
if isinstance(other, OpTreeContainer):
return (self.item == other.item)
def copy(self):
'Function for copying a OpTreeLeafContainer object.'
return OpTreeContainer(copy.deepcopy(self.item))
|
class OpTreeValue(OpTreeLeafBase):
'\n A leaf that contains an evaluated value.\n\n Args:\n value (float): A float value that is represented by the leaf.\n '
def __init__(self, value: float) -> None:
self.value = value
def __str__(self) -> str:
'Returns the string representation of the value.'
return str(self.value)
def __eq__(self, other) -> bool:
'Function for comparing two OpTreeLeafValues.'
if isinstance(other, OpTreeValue):
return (self.value == other.value)
def copy(self):
'Function for copying a OpTreeLeafValue object.'
return OpTreeValue(self.value)
|
def _simplify_operator(element: Union[(SparsePauliOp, OpTreeOperator)]) -> Union[(SparsePauliOp, OpTreeOperator)]:
if isinstance(element, OpTreeOperator):
operator = element.operator
input_type = 'leaf'
else:
operator = element
input_type = 'operator'
pauli_list = []
coeff_list = []
for (i, pauli) in enumerate(operator.paulis):
if (pauli in pauli_list):
index = pauli_list.index(pauli)
coeff_list[index] += operator.coeffs[i]
else:
pauli_list.append(pauli)
coeff_list.append(operator.coeffs[i])
if (len(pauli_list) > 0):
operator_simp = SparsePauliOp(pauli_list, coeff_list)
if (input_type == 'leaf'):
return OpTreeOperator(operator_simp)
return operator_simp
else:
return None
|
class OpTree():
'Static class containing functions for working with OpTrees objects.'
from .optree_derivative import OpTreeDerivative
derivative = OpTreeDerivative
from .optree_evaluate import OpTreeEvaluate
evaluate = OpTreeEvaluate
@staticmethod
def hash_circuit(circuit: QuantumCircuit) -> tuple:
'Hashes a circuit using the qiskit _circuit_key function.\n\n Args:\n circuit (QuantumCircuit): The circuit to be hashed.\n\n Returns:\n a tuple containing the circuit information that can be used for comparison.\n\n '
from qiskit.primitives.utils import _circuit_key
return _circuit_key(circuit)
@staticmethod
def hash_operator(operator: SparsePauliOp) -> tuple:
'Hashes an operator using the qiskit _observable_key function.\n\n Args:\n operator (SparsePauliOp): The operator to be hashed.\n\n Returns:\n A tuple containing the operator information that can be used for comparison.\n '
from qiskit.primitives.utils import _observable_key
return _observable_key(operator)
@staticmethod
def get_number_of_leafs(tree: OpTreeElementBase) -> int:
'Returns the number of leafs of the OpTree.\n\n Args:\n tree (OpTreeElementBase): The OpTree.\n\n Returns:\n int: The number of leafs of the OpTree.\n '
if isinstance(tree, OpTreeLeafBase):
return 1
else:
num = 0
for child in tree.children:
num += OpTree.get_number_of_leafs(child)
return num
@staticmethod
def get_tree_depth(tree: OpTreeElementBase) -> int:
'Returns the depth of the OpTree.\n\n Args:\n tree (OpTreeElementBase): The OpTree.\n\n Returns:\n int: The depth of the OpTree.\n '
if isinstance(tree, OpTreeLeafBase):
return 0
else:
depth = 0
for child in tree.children:
depth = max(depth, OpTree.get_tree_depth(child))
return (depth + 1)
@staticmethod
def get_num_nested_lists(tree: OpTreeElementBase) -> int:
'Returns the depth of the OpTree.\n\n Args:\n tree (OpTreeElementBase): The OpTree.\n\n Returns:\n int: The depth of the OpTree.\n '
if isinstance(tree, OpTreeLeafBase):
return 0
else:
depth = 0
for child in tree.children:
depth = min(depth, OpTree.get_tree_depth(child))
if isinstance(tree, OpTreeList):
return (depth + 1)
return depth
@staticmethod
def get_first_leaf(element: Union[(OpTreeNodeBase, OpTreeLeafBase, QuantumCircuit, SparsePauliOp)]) -> Union[(OpTreeLeafBase, QuantumCircuit, SparsePauliOp)]:
'Returns the first leaf of the supplied OpTree.\n\n Args:\n element (Union[OpTreeNodeBase, OpTreeLeafBase, QuantumCircuit, SparsePauliOp]): The OpTree.\n\n Returns:\n The first found leaf of the OpTree.\n '
if isinstance(element, OpTreeNodeBase):
return OpTree.get_first_leaf(element.children[0])
else:
return element
@staticmethod
def gen_expectation_tree(circuit_tree: Union[(OpTreeNodeBase, OpTreeCircuit, QuantumCircuit)], operator_tree: Union[(OpTreeNodeBase, OpTreeMeasuredOperator, OpTreeOperator, SparsePauliOp)]):
'\n Function that generates an expectation tree from a circuit tree and an operator tree.\n\n .. currentmodule:: squlearn.util.optree\n\n The operator tree is applied to each leaf of the circuit tree and the\n resulting expectation values are returned as :class:`OpTreeExpectationValue`.\n\n Args:\n circuit_tree (Union[OpTreeNodeBase, OpTreeLeafCircuit, QuantumCircuit]): The circuit tree.\n operator_tree (Union[OpTreeNodeBase, OpTreeLeafMeasuredOperator, OpTreeLeafOperator, SparsePauliOp]): The operator tree.\n\n Returns:\n The combined tree with :class:`OpTreeExpectationValue` at the leafs.\n '
if isinstance(circuit_tree, OpTreeNodeBase):
children_list = [OpTree.gen_expectation_tree(child, operator_tree) for child in circuit_tree.children]
factor_list = circuit_tree.factor
operation_list = circuit_tree.operation
if isinstance(circuit_tree, OpTreeSum):
return OpTreeSum(children_list, factor_list, operation_list)
elif isinstance(circuit_tree, OpTreeList):
return OpTreeList(children_list, factor_list, operation_list)
else:
raise ValueError('wrong type of circuit_tree')
elif isinstance(circuit_tree, (OpTreeCircuit, QuantumCircuit)):
if isinstance(operator_tree, OpTreeNodeBase):
children_list = [OpTree.gen_expectation_tree(circuit_tree, child) for child in operator_tree.children]
factor_list = operator_tree.factor
operation_list = operator_tree.operation
if isinstance(operator_tree, OpTreeSum):
return OpTreeSum(children_list, factor_list, operation_list)
elif isinstance(operator_tree, OpTreeList):
return OpTreeList(children_list, factor_list, operation_list)
else:
raise ValueError('element must be a CircuitTreeSum or a CircuitTreeList')
elif isinstance(operator_tree, (OpTreeOperator, SparsePauliOp)):
return OpTreeExpectationValue(circuit_tree, operator_tree)
elif isinstance(operator_tree, OpTreeMeasuredOperator):
return operator_tree.measure_circuit(circuit_tree)
else:
raise ValueError('wrong type of operator_tree')
else:
raise ValueError('circuit_tree must be a CircuitTreeSum or a CircuitTreeList', type(circuit_tree))
@staticmethod
def simplify(element: Union[(OpTreeNodeBase, OpTreeLeafBase, QuantumCircuit, SparsePauliOp)]) -> Union[(OpTreeNodeBase, OpTreeLeafBase, QuantumCircuit, SparsePauliOp)]:
'\n Function for simplifying an OpTree structure, the input is kept untouched.\n\n Merges double sums and identifies identical branches or leafs in sums.\n\n Args:\n element (Union[OpTreeNodeBase, OpTreeLeafBase, QuantumCircuit, SparsePauliOp]): The OpTree to be simplified.\n\n Returns:\n A simplified copy of the OpTree.\n '
def combine_two_ops(op1, op2):
'Helper function for combining two operations into one.\n\n TODO: not used/tested yet\n '
if ((op1 is None) and (op2 is None)):
return None
elif ((op1 is None) and (op2 is not None)):
return op2
elif ((op1 is not None) and (op2 is None)):
return op1
else:
return (lambda x: op1(op2(x)))
if isinstance(element, OpTreeNodeBase):
if (len(element.children) > 0):
children_list = [OpTree.simplify(child) for child in element.children]
factor_list = element.factor
operation_list = element.operation
if isinstance(element, OpTreeSum):
new_element = OpTreeSum(children_list, factor_list, operation_list)
elif isinstance(element, OpTreeList):
new_element = OpTreeList(children_list, factor_list, operation_list)
else:
raise ValueError('element must be a CircuitTreeSum or a CircuitTreeList')
if (isinstance(new_element, OpTreeSum) and any([isinstance(child, OpTreeSum) for child in new_element.children])):
children_list = []
factor_list = []
operation_list = []
for (i, child) in enumerate(new_element.children):
if isinstance(child, OpTreeSum):
for (j, childs_child) in enumerate(child.children):
children_list.append(childs_child)
factor_list.append((new_element.factor[i] * child.factor[j]))
operation_list.append(combine_two_ops(new_element.operation[i], child.operation[j]))
else:
children_list.append(child)
factor_list.append(new_element.factor[i])
operation_list.append(new_element.operation[i])
new_element = OpTreeSum(children_list, factor_list, operation_list)
if isinstance(new_element, OpTreeSum):
children_list = []
factor_list = []
operation_list = []
for (i, child) in enumerate(new_element.children):
if (child in children_list):
index = children_list.index(child)
factor_list[index] += new_element.factor[i]
else:
children_list.append(child)
factor_list.append(new_element.factor[i])
operation_list.append(new_element.operation[i])
new_element = OpTreeSum(children_list, factor_list, operation_list)
return new_element
else:
return copy.deepcopy(element)
elif isinstance(element, (SparsePauliOp, OpTreeOperator)):
return _simplify_operator(element)
else:
return copy.deepcopy(element)
@staticmethod
def assign_parameters(element: Union[(OpTreeNodeBase, OpTreeCircuit, QuantumCircuit)], dictionary, inplace: bool=False):
'\n Assigns the parameters of the OpTree structure to the values in the dictionary.\n\n Args:\n element (Union[OpTreeNodeBase, OpTreeLeafCircuit, QuantumCircuit]): The OpTree for which\n all parameters are\n assigned.\n dictionary (dict): The dictionary that contains the parameter names as keys\n and the parameter values as values.\n\n Returns:\n The OpTree structure with all parameters assigned, (copied if inplace=False)\n '
if isinstance(element, OpTreeNodeBase):
if inplace:
for c in element.children:
OpTree.assign_parameters(c, dictionary, inplace=True)
for (i, fac) in enumerate(element.factor):
if isinstance(fac, ParameterExpression):
element.factor[i] = float(fac.bind(dictionary, allow_unknown_parameters=True))
else:
child_list_assigned = [OpTree.assign_parameters(c, dictionary) for c in element.children]
factor_list_bound = []
for fac in element.factor:
if isinstance(fac, ParameterExpression):
factor_list_bound.append(float(fac.bind(dictionary, allow_unknown_parameters=True)))
else:
factor_list_bound.append(fac)
if isinstance(element, OpTreeSum):
return OpTreeSum(child_list_assigned, factor_list_bound, element.operation)
elif isinstance(element, OpTreeList):
return OpTreeList(child_list_assigned, factor_list_bound, element.operation)
else:
raise ValueError('element must be a CircuitTreeSum or a CircuitTreeList')
elif isinstance(element, OpTreeCircuit):
if inplace:
element.circuit.assign_parameters([dictionary[p] for p in element.circuit.parameters], inplace=True)
else:
return OpTreeCircuit(element.circuit.assign_parameters([dictionary[p] for p in element.circuit.parameters], inplace=False))
elif isinstance(element, QuantumCircuit):
if inplace:
element.assign_parameters([dictionary[p] for p in element.parameters], inplace=True)
else:
return element.assign_parameters([dictionary[p] for p in element.parameters], inplace=False)
elif isinstance(element, (OpTreeExpectationValue, OpTreeMeasuredOperator)):
if inplace:
element.circuit.assign_parameters([dictionary[p] for p in element.circuit.parameters], inplace=True)
element.operator.assign_parameters([dictionary[p] for p in element.operator.parameters], inplace=True)
else:
return OpTreeExpectationValue(element.circuit.assign_parameters([dictionary[p] for p in element.circuit.parameters], inplace=False), element.operator.assign_parameters([dictionary[p] for p in element.operator.parameters], inplace=False))
elif isinstance(element, OpTreeOperator):
if inplace:
element.operator.assign_parameters([dictionary[p] for p in element.operator.parameters], inplace=True)
else:
return OpTreeOperator(element.operator.assign_parameters([dictionary[p] for p in element.operator.parameters], inplace=False))
elif isinstance(element, SparsePauliOp):
if inplace:
element.assign_parameters([dictionary[p] for p in element.parameters], inplace=True)
else:
return element.assign_parameters([dictionary[p] for p in element.parameters], inplace=False)
else:
raise ValueError('element must be a OpTreeNodeBase, OpTreeLeafCircuit or a QuantumCircuit')
|
def _circuit_parameter_shift(element: Union[(OpTreeCircuit, QuantumCircuit, OpTreeValue)], parameter: ParameterExpression) -> Union[(None, OpTreeSum, OpTreeValue)]:
'\n Build the parameter shift derivative of a circuit w.r.t. a single parameter.\n\n Args:\n element (Union[OpTreeLeafCircuit, QuantumCircuit]): The circuit to be differentiated.\n parameter (ParameterExpression): The parameter w.r.t. which the circuit is differentiated.\n\n Returns:\n The parameter shift derivative of the circuit (always a OpTreeNodeSum)\n '
if isinstance(element, OpTreeValue):
return OpTreeValue(0.0)
if isinstance(element, OpTreeCircuit):
circuit = element.circuit
input_type = 'leaf'
elif isinstance(element, QuantumCircuit):
circuit = element
input_type = 'circuit'
else:
raise ValueError('element must be a CircuitTreeLeaf or a QuantumCircuit')
circuit = OpTreeDerivative.transpile_to_supported_instructions(circuit)
if (parameter not in circuit._parameter_table):
return OpTreeValue(0.0)
iref_to_data_index = {id(inst.operation): idx for (idx, inst) in enumerate(circuit.data)}
shift_sum = OpTreeSum()
for param_reference in circuit._parameter_table[parameter]:
(original_gate, param_index) = param_reference
m = iref_to_data_index[id(original_gate)]
fac = original_gate.params[0].gradient(parameter)
pshift_circ = copy.deepcopy(circuit)
mshift_circ = copy.deepcopy(circuit)
pshift_gate = pshift_circ.data[m].operation
mshift_gate = mshift_circ.data[m].operation
p_param = pshift_gate.params[param_index]
m_param = mshift_gate.params[param_index]
shift_constant = 0.5
pshift_gate.params[param_index] = (p_param + (np.pi / (4 * shift_constant)))
mshift_gate.params[param_index] = (m_param - (np.pi / (4 * shift_constant)))
if (input_type == 'leaf'):
shift_sum.append(OpTreeCircuit(pshift_circ), (shift_constant * fac))
shift_sum.append(OpTreeCircuit(mshift_circ), ((- shift_constant) * fac))
else:
shift_sum.append(pshift_circ, (shift_constant * fac))
shift_sum.append(mshift_circ, ((- shift_constant) * fac))
return shift_sum
|
def _operator_differentiation(element: Union[(OpTreeOperator, SparsePauliOp, OpTreeValue)], parameter: ParameterExpression) -> Union[(OpTreeOperator, SparsePauliOp, OpTreeValue)]:
'\n Obtain the derivative of an operator w.r.t. a single parameter.\n\n Args:\n element (Union[OpTreeLeafOperator, SparsePauliOp]): The operator to be differentiated.\n parameter (ParameterExpression): The parameter w.r.t. which the operator is differentiated.\n\n Returns:\n Operator derivative as OpTreeLeafOperator or SparsePauliOp\n\n '
if isinstance(element, OpTreeValue):
return OpTreeValue(0.0)
if isinstance(element, OpTreeOperator):
operator = element.operator
input_type = 'leaf'
elif isinstance(element, SparsePauliOp):
operator = element
input_type = 'sparse_pauli_op'
if (parameter not in operator.parameters):
return OpTreeValue(0.0)
op_list = []
param_list = []
for (i, coeff) in enumerate(operator.coeffs):
if isinstance(coeff, ParameterExpression):
d_coeff = ((- 1j) * (1j * coeff).gradient(parameter))
if isinstance(d_coeff, complex):
if (d_coeff.imag == 0):
d_coeff = d_coeff.real
else:
d_coeff = 0.0
if (d_coeff != 0.0):
op_list.append(operator.paulis[i])
param_list.append(d_coeff)
if (len(op_list) > 0):
operator_grad = SparsePauliOp(op_list, param_list)
if (input_type == 'leaf'):
return OpTreeOperator(operator_grad)
return operator_grad
return None
|
def _differentiate_inplace(tree_node: OpTreeNodeBase, parameter: ParameterExpression) -> None:
'\n Create the derivative of a OpTreeNode w.r.t. a single parameter, modifies the tree inplace.\n\n Functions returns nothing, since the OpTree is modified inplace.\n\n Args:\n tree_node (OpTreeNodeBase): The OpTree Node to be differentiated.\n parameter (ParameterExpression): The parameter w.r.t. which the circuit is differentiated.\n\n '
if isinstance(tree_node, OpTreeNodeBase):
remove_list = []
for (i, child) in enumerate(tree_node.children):
if isinstance(tree_node.factor[i], ParameterExpression):
grad_fac = tree_node.factor[i].gradient(parameter)
else:
grad_fac = 0.0
if isinstance(child, (QuantumCircuit, OpTreeCircuit)):
grad = _circuit_parameter_shift(child, parameter)
elif isinstance(child, (SparsePauliOp, OpTreeOperator)):
grad = _operator_differentiation(child, parameter)
elif isinstance(child, OpTreeMeasuredOperator):
grad_op = _operator_differentiation(child.operator, parameter)
if isinstance(grad_op, OpTreeValue):
grad = grad_op
else:
grad = OpTreeMeasuredOperator(child.circuit, grad_op)
elif isinstance(child, OpTreeExpectationValue):
raise NotImplementedError('Expectation value differentiation not implemented yet')
else:
_differentiate_inplace(child, parameter)
grad = child
if isinstance(grad_fac, float):
if (grad_fac == 0.0):
tree_node.children[i] = grad
else:
tree_node.children[i] = OpTreeSum([child, grad], [grad_fac, tree_node.factor[i]])
tree_node.factor[i] = 1.0
else:
tree_node.children[i] = OpTreeSum([child, grad], [grad_fac, tree_node.factor[i]])
tree_node.factor[i] = 1.0
if (len(remove_list) > 0):
tree_node.remove(remove_list)
else:
raise ValueError('tree_node must be a OpTreeNodeSum or a OpTreeNodeList')
|
def _differentiate_copy(element: Union[(OpTreeNodeBase, OpTreeCircuit, QuantumCircuit, OpTreeOperator, SparsePauliOp)], parameter: ParameterExpression) -> OpTreeNodeBase:
'\n Create the derivative of a OpTree or circuit w.r.t. a single parameter, the input is untouched.\n\n Args:\n element (Union[OpTreeNodeBase, OpTreeLeafCircuit, QuantumCircuit]): The OpTree (or circuit) to be differentiated.\n parameter (ParameterExpression): The parameter w.r.t. which the circuit is differentiated.\n\n Returns:\n The derivative of the circuit as an OpTree\n '
if isinstance(element, OpTreeNodeBase):
children_list = []
factor_list = []
for (i, child) in enumerate(element.children):
if isinstance(element.factor[i], ParameterExpression):
grad_fac = element.factor[i].gradient(parameter)
fac = element.factor[i]
grad = _differentiate_copy(child, parameter)
if isinstance(grad_fac, float):
if (grad_fac == 0.0):
children_list.append(grad)
factor_list.append(fac)
else:
children_list.append(OpTreeSum([child, grad], [grad_fac, fac]))
factor_list.append(1.0)
else:
children_list.append(OpTreeSum([child, grad], [grad_fac, fac]))
factor_list.append(1.0)
else:
children_list.append(_differentiate_copy(child, parameter))
factor_list.append(element.factor[i])
if isinstance(element, OpTreeSum):
return OpTreeSum(children_list, factor_list)
elif isinstance(element, OpTreeList):
return OpTreeList(children_list, factor_list)
else:
raise ValueError('element must be a CircuitTreeSum or a CircuitTreeList')
elif isinstance(element, (QuantumCircuit, OpTreeCircuit)):
return _circuit_parameter_shift(element, parameter)
elif isinstance(element, (SparsePauliOp, OpTreeOperator)):
return _operator_differentiation(element, parameter)
elif isinstance(element, OpTreeMeasuredOperator):
grad_op = _operator_differentiation(element.operator, parameter)
if isinstance(grad_op, OpTreeValue):
return grad_op
return OpTreeMeasuredOperator(element.circuit, grad_op)
elif isinstance(element, OpTreeExpectationValue):
raise NotImplementedError('Expectation value differentiation not implemented yet')
else:
raise ValueError(('Unsupported element type: ' + str(type(element))))
|
class OpTreeDerivative():
'Static class for differentiation of a OpTrees, circuits, or operators.'
SUPPORTED_GATES = {'s', 'sdg', 't', 'tdg', 'ecr', 'sx', 'x', 'y', 'z', 'h', 'rx', 'ry', 'rz', 'p', 'cx', 'cy', 'cz'}
@staticmethod
def transpile_to_supported_instructions(circuit: QuantumCircuit, supported_gates: Set[str]=SUPPORTED_GATES) -> QuantumCircuit:
'Function for transpiling a circuit to a supported instruction set for gradient calculation.\n\n Args:\n circuit (QuantumCircuit): Circuit to transpile.\n supported_gates (Set[str]): Set of supported gates (Default set given).\n\n Returns:\n Circuit which is transpiled to the supported instruction set.\n '
unique_ops = set(circuit.count_ops())
if (not unique_ops.issubset(supported_gates)):
circuit = transpile(circuit, basis_gates=list(supported_gates), optimization_level=0, layout_method='trivial')
return circuit
@staticmethod
def differentiate(element: Union[(OpTreeNodeBase, OpTreeCircuit, QuantumCircuit, OpTreeOperator, SparsePauliOp)], parameters: Union[(ParameterExpression, List[ParameterExpression], ParameterVector)]) -> OpTreeNodeBase:
'\n Calculates the derivative of a OpTree (or circuit) w.r.t. to a parameter or a list of parameters.\n\n Args:\n element (Union[OpTreeNodeBase, OpTreeLeafCircuit, QuantumCircuit]): OpTree (or circuit)\n to be differentiated.\n parameters (Union[ParameterExpression, List[ParameterExpression], ParameterVector]): Parameter(s) w.r.t.\n the OpTree is\n differentiated\n\n Returns:\n The derivative of the OpTree (or circuit) in OpTree form.\n '
is_list = True
if isinstance(parameters, ParameterExpression):
parameters = [parameters]
is_list = False
if isinstance(element, (QuantumCircuit, OpTreeCircuit)):
if isinstance(element, OpTreeCircuit):
element = OpTreeCircuit(OpTreeDerivative.transpile_to_supported_instructions(element.circuit))
else:
element = OpTreeCircuit(OpTreeDerivative.transpile_to_supported_instructions(element))
is_node = True
if (not isinstance(element, OpTreeNodeBase)):
is_node = False
start = OpTreeList([element], [1.0])
else:
start = element
derivative_list = []
fac_list = []
for dp in parameters:
res = copy.deepcopy(start)
_differentiate_inplace(res, dp)
if is_node:
derivative_list.append(res)
fac_list.append(1.0)
elif (len(res.children) > 0):
derivative_list.append(res.children[0])
fac_list.append(res.factor[0])
if (is_list or (len(derivative_list) == 0)):
return OpTreeList(derivative_list, fac_list)
else:
return derivative_list[0]
@staticmethod
def differentiate_v2(element: Union[(OpTreeNodeBase, OpTreeCircuit, QuantumCircuit, OpTreeOperator, SparsePauliOp)], parameters: Union[(ParameterExpression, List[ParameterExpression], ParameterVector)]) -> OpTreeNodeBase:
'\n Calculates the derivative of a OpTree (or circuit) w.r.t. to a parameter or a list of parameters.\n\n Second implementation, in which the derivative is calculated during the recursive derivative\n computation.\n\n Args:\n element (Union[OpTreeNodeBase, OpTreeLeafCircuit, QuantumCircuit]): OpTree (or circuit)\n to be differentiated.\n parameters (Union[ParameterExpression, List[ParameterExpression], ParameterVector]): Parameter(s) w.r.t.\n the OpTree is\n differentiated\n\n Returns:\n The derivative of the OpTree (or circuit) in OpTree form.\n '
is_list = True
if isinstance(parameters, ParameterExpression):
parameters = [parameters]
is_list = False
if isinstance(element, (QuantumCircuit, OpTreeCircuit)):
if isinstance(element, OpTreeCircuit):
element = OpTreeCircuit(OpTreeDerivative.transpile_to_supported_instructions(element.circuit))
else:
element = OpTreeCircuit(OpTreeDerivative.transpile_to_supported_instructions(element))
derivative_list = []
fac_list = []
for dp in parameters:
derivative_list.append(_differentiate_copy(element, dp))
fac_list.append(1.0)
if (is_list or (len(derivative_list) == 0)):
return OpTreeList(derivative_list, fac_list)
else:
return derivative_list[0]
|
def get_quantum_fisher(encoding_circuit: EncodingCircuitBase, x: np.ndarray, p: np.ndarray, executor: Executor, mode: str='p'):
'\n Function for evaluating the Quantum Fisher Information Matrix of a encoding circuit.\n\n The Quantum Fisher Information Matrix (QFIM) is evaluated the supplied numerical\n features and parameter value.\n\n Mode enables the user to choose between different modes of evaluation:\n * ``"p"`` : QFIM for parameters only\n * ``"x"`` : QFIM for features only\n * ``"px"`` : QFIM for parameters and features (order parameters first)\n * ``"xp"`` : QFIM for features and parameters (order features first)\n\n In case of multiple inputs for ``x`` and ``p``, the QFIM is evaluated for each input separately and\n returned as a numpy matrix.\n\n Args:\n encoding_circuit (EncodingCircuitBase): Encoding circuit for which the QFIM is evaluated\n x (np.ndarray): Input data values for replacing the features in the encoding circuit\n p (np.ndarray): Parameter values for replacing the parameters in the encoding circuit\n executor (Executor): Executor for evaluating the QFIM (utilizes estimator)\n mode (str): Mode for evaluating the QFIM, possibilities: ``"p"``, ``"x"``,\n ``"px"``, ``"xp"`` (default: ``"p"``)\n\n Return:\n Numpy matrix with the QFIM, in case of multiple inputs, the array is nested.\n '
qfi = QFI(LinCombQGT(executor.get_estimator()))
p_ = ParameterVector('p', encoding_circuit.num_parameters)
x_ = ParameterVector('x', encoding_circuit.num_features)
circuit = encoding_circuit.get_circuit(x_, p_)
(x_list, multi_x) = adjust_features(x, encoding_circuit.num_features)
(p_list, multi_p) = adjust_parameters(p, encoding_circuit.num_parameters)
circ_list = []
param_values_list = []
param_list = []
if (mode == 'p'):
for xval in x_list:
circ_temp = circuit.assign_parameters(dict(zip(x_, xval)))
for pval in p_list:
circ_list.append(circ_temp)
param_values_list.append(pval)
param_list.append(p_)
elif (mode == 'x'):
for xval in x_list:
for pval in p_list:
circ_list.append(circuit.assign_parameters(dict(zip(p_, pval))))
param_values_list.append(xval)
param_list.append(x_)
elif (mode == 'xp'):
for xval in x_list:
for pval in p_list:
circ_list.append(circuit)
param_values_list.append(np.concatenate((xval, pval)))
param_list.append((list(x_) + list(p_)))
elif (mode == 'px'):
for xval in x_list:
for pval in p_list:
circ_list.append(circuit)
param_values_list.append(np.concatenate((pval, xval)))
param_list.append((list(p_) + list(x_)))
qfis = np.array(qfi.run(circ_list, param_values_list, param_list).result().qfis)
reshape_list = []
if multi_x:
reshape_list.append(len(x_list))
if multi_p:
reshape_list.append(len(p_list))
if (len(reshape_list) > 0):
qfis = qfis.reshape((reshape_list + list(qfis[0].shape)))
else:
qfis = qfis[0]
executor.clear_estimator_cache()
return qfis
|
class TestLayeredEncodingCircuit():
'Test class for LayeredEncodingCircuit.'
def test_layered_encoding_circuit_gates(self):
'Test the non-parameterized gates of the LayeredEncodingCircuit.'
lfm = LayeredEncodingCircuit(num_qubits=4, num_features=0)
lfm.H()
expected_circuit = QuantumCircuit(4)
expected_circuit.h(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.X()
expected_circuit.x(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.Y()
expected_circuit.y(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.Z()
expected_circuit.z(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.I()
expected_circuit.i(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.S()
expected_circuit.s(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.S_conjugate()
expected_circuit.sdg(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.T()
expected_circuit.t(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm.T_conjugate()
expected_circuit.tdg(range(4))
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
kernel = FidelityKernel(lfm, Executor('statevector_simulator')).evaluate(np.array([[]]), np.array([[]]))
assert np.allclose(kernel, np.array([1.0]))
def test_layered_encoding_circuit_param_gates(self):
'Test the parameterized gates of the LayeredEncodingCircuit.'
lfm = LayeredEncodingCircuit(num_qubits=4, num_features=2)
expected_circuit = QuantumCircuit(4)
p = ParameterVector('p', 16)
x = ParameterVector('x', 2)
lfm.Rx('p', encoding=np.arccos)
for i in range(4):
expected_circuit.rx(np.arccos(p[i]), i)
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
lfm.Ry('p', encoding=np.arccos)
for i in range(4):
expected_circuit.ry(np.arccos(p[(i + 4)]), i)
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
lfm.Rz('p', encoding=np.arccos)
for i in range(4):
expected_circuit.rz(np.arccos(p[(i + 8)]), i)
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
lfm.P('p', encoding=np.arccos)
for i in range(4):
expected_circuit.p(np.arccos(p[(i + 12)]), i)
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
lfm.U(('x', 'x', 'x'))
ioff = 0
for i in range(4):
expected_circuit.u(x[(ioff % 2)], x[((ioff + 1) % 2)], x[((ioff + 2) % 2)], i)
ioff += 3
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
kernel = FidelityKernel(lfm, Executor('statevector_simulator'), initial_parameters=(0.5 * np.ones(16))).evaluate(np.ones((1, 2)), np.ones((1, 2)))
assert np.allclose(kernel, np.array([1.0]))
def test_layered_encoding_circuit_entangling_gates(self):
'Test the entangling gates of the LayeredEncodingCircuit.'
def add_NN(gate_function):
gate_function(0, 1)
gate_function(2, 3)
gate_function(1, 2)
def add_AA(gate_function):
gate_function(0, 1)
gate_function(0, 2)
gate_function(0, 3)
gate_function(1, 2)
gate_function(1, 3)
gate_function(2, 3)
lfm = LayeredEncodingCircuit(num_qubits=4, num_features=0)
expected_circuit = QuantumCircuit(4)
lfm_list = [lfm.ch_entangling, lfm.cx_entangling, lfm.cy_entangling, lfm.cz_entangling, lfm.swap]
qiskit_list = [expected_circuit.ch, expected_circuit.cx, expected_circuit.cy, expected_circuit.cz, expected_circuit.swap]
for (lfm_gate, qiskit_gate) in zip(lfm_list, qiskit_list):
lfm_gate('NN')
add_NN(qiskit_gate)
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
lfm_gate('AA')
add_AA(qiskit_gate)
assert (str(lfm.get_circuit([], [])) == str(expected_circuit))
kernel = FidelityKernel(lfm, Executor('statevector_simulator')).evaluate(np.array([[]]), np.array([[]]))
assert np.allclose(kernel, np.array([1.0]))
def test_layered_encoding_circuit_param_entangling_gates(self):
'Test the parameterized entangling gates of the LayeredEncodingCircuit.'
def add_NN(gate_function, p, offset=0):
gate_function(np.arccos(p[offset]), 0, 1)
gate_function(np.arccos(p[(offset + 1)]), 2, 3)
gate_function(np.arccos(p[(offset + 2)]), 1, 2)
return (offset + 3)
def add_AA(gate_function, p, offset=0):
gate_function(np.arccos(p[offset]), 0, 1)
gate_function(np.arccos(p[(offset + 1)]), 0, 2)
gate_function(np.arccos(p[(offset + 2)]), 0, 3)
gate_function(np.arccos(p[(offset + 3)]), 1, 2)
gate_function(np.arccos(p[(offset + 4)]), 1, 3)
gate_function(np.arccos(p[(offset + 5)]), 2, 3)
return (offset + 6)
lfm = LayeredEncodingCircuit(num_qubits=4, num_features=0)
expected_circuit = QuantumCircuit(4)
p = ParameterVector('p', 72)
x = ParameterVector('x', 2)
offset = 0
lfm_list = [lfm.cp_entangling, lfm.crx_entangling, lfm.cry_entangling, lfm.crz_entangling, lfm.rxx_entangling, lfm.ryy_entangling, lfm.rzz_entangling, lfm.rzx_entangling]
qiskit_list = [expected_circuit.cp, expected_circuit.crx, expected_circuit.cry, expected_circuit.crz, expected_circuit.rxx, expected_circuit.ryy, expected_circuit.rzz, expected_circuit.rzx]
for (lfm_gate, qiskit_gate) in zip(lfm_list, qiskit_list):
lfm_gate('p', ent_strategy='NN', encoding=np.arccos)
offset = add_NN(qiskit_gate, p, offset)
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
lfm_gate('p', ent_strategy='AA', encoding=np.arccos)
offset = add_AA(qiskit_gate, p, offset)
assert (str(lfm.get_circuit(x, p)) == str(expected_circuit))
kernel = FidelityKernel(lfm, Executor('statevector_simulator'), initial_parameters=(0.5 * np.ones(72))).evaluate(np.array([[]]), np.array([[]]))
assert np.allclose(kernel, np.array([1.0]))
def test_from_string(self):
'Test the from_string method of the LayeredEncodingCircuit.'
lfm = LayeredEncodingCircuit.from_string('Ry(p)-3[Rx(p,x;=y*np.arccos(x),{y,x})-crz(p)]-Ry(p)', num_qubits=4, num_features=1)
cpqc = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=3, closed=False)
assert (str(lfm.draw(output='text')) == str(cpqc.draw(output='text')))
kernel = FidelityKernel(lfm, Executor('statevector_simulator'), initial_parameters=(0.5 * np.ones(29))).evaluate(np.array([0.5]), np.array([0.5]))
assert np.allclose(kernel, np.array([1.0]))
|
class TestQGPC():
'Test class for QGPC.'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_blobs(n_samples=6, n_features=2, centers=2, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qgpc_fidelity(self) -> QGPC:
'QGPC module with FidelityKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = HubregtsenEncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = FidelityKernel(encoding_circuit, executor=executor, regularization='thresholding', mit_depol_noise='msplit')
return QGPC(quantum_kernel=kernel)
@pytest.fixture(scope='module')
def qgpc_pqk(self) -> QGPC:
'QGPC module wit ProjectedQuantumKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = HubregtsenEncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = ProjectedQuantumKernel(encoding_circuit, executor=executor, regularization='thresholding')
return QGPC(quantum_kernel=kernel)
def test_that_qgpc_params_are_present(self):
'Asserts that all classical parameters are present in the QGPC.'
qgpc_instance = QGPC(quantum_kernel=MagicMock())
assert (list(qgpc_instance.get_params(deep=False).keys()) == ['copy_X_train', 'max_iter_predict', 'multi_class', 'n_jobs', 'n_restarts_optimizer', 'optimizer', 'random_state', 'quantum_kernel'])
@pytest.mark.parametrize('qgpc', ['qgpc_fidelity', 'qgpc_pqk'])
def test_predict_unfitted(self, qgpc, request, data):
'Tests concerning the unfitted QGPC.\n\n Tests include\n - whether a NotFittedError is raised\n '
qgpc_instance = request.getfixturevalue(qgpc)
(X, _) = data
with pytest.raises(NotFittedError):
qgpc_instance.predict(X)
@pytest.mark.parametrize('qgpc', ['qgpc_fidelity', 'qgpc_pqk'])
def test_predict(self, qgpc, request, data):
'Tests concerning the predict function of the QNNClassifier.\n\n Tests include\n - whether the prediction output is correct\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qgpc_instance = request.getfixturevalue(qgpc)
(X, y) = data
qgpc_instance.fit(X, y)
y_pred = qgpc_instance.predict(X)
assert isinstance(y_pred, np.ndarray)
assert (y_pred.shape == y.shape)
assert np.allclose(y_pred, y)
@pytest.mark.parametrize('qgpc', ['qgpc_fidelity', 'qgpc_pqk'])
def test_predict_probability(self, qgpc, request, data):
'Tests concerning the predict function of the QNNClassifier.\n\n Tests include\n - whether the prediction output is correct\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qgpc_instance = request.getfixturevalue(qgpc)
(X, y) = data
qgpc_instance.fit(X, y)
y_prob = qgpc_instance.predict(X)
assert isinstance(y_prob, np.ndarray)
assert (y_prob.shape == y.shape)
@pytest.mark.parametrize('qgpc', ['qgpc_fidelity', 'qgpc_pqk'])
def test_kernel_params_can_be_changed_after_initialization(self, qgpc, request, data):
'Tests concerning the kernel parameter changes.'
qgpc_instance = request.getfixturevalue(qgpc)
qgpc_params = qgpc_instance.get_params()
assert (qgpc_params['num_qubits'] == 3)
assert (qgpc_params['regularization'] == 'thresholding')
qgpc_instance.set_params(num_qubits=4, regularization='tikhonov')
qgpc_params_updated = qgpc_instance.get_params()
assert (qgpc_params_updated['num_qubits'] == 4)
assert (qgpc_params_updated['regularization'] == 'tikhonov')
(X, y) = data
try:
qgpc_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to quantum kernel parameters'
@pytest.mark.parametrize('qgpc', ['qgpc_fidelity', 'qgpc_pqk'])
def test_encoding_circuit_params_can_be_changed_after_initialization(self, qgpc, request, data):
'Tests concerning the encoding circuit parameter changes.'
qgpc_instance = request.getfixturevalue(qgpc)
assert (qgpc_instance.get_params()['num_layers'] == 2)
qgpc_instance.set_params(num_layers=4)
assert (qgpc_instance.get_params()['num_layers'] == 4)
(X, y) = data
try:
qgpc_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit parameters'
def test_pqk_params_can_be_changed_after_initialization(self, qgpc_pqk, data):
'Tests concerning the encoding circuit parameter changes.'
qgpc_params = qgpc_pqk.get_params()
assert (qgpc_params['gamma'] == 1.0)
assert (qgpc_params['measurement'] == 'XYZ')
qgpc_pqk.set_params(gamma=0.5, measurement='Z')
qgpc_params_updated = qgpc_pqk.get_params()
assert (qgpc_params_updated['gamma'] == 0.5)
assert (qgpc_params_updated['measurement'] == 'Z')
(X, y) = data
try:
qgpc_pqk.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit parameters'
@pytest.mark.parametrize('qgpc', ['qgpc_fidelity', 'qgpc_pqk'])
def test_classical_params_can_be_changed_after_initialization(self, qgpc, request):
'Tests concerning the parameters of the classical GPC changes.'
qgpc_instance = request.getfixturevalue(qgpc)
assert (qgpc_instance.get_params()['max_iter_predict'] == 100)
qgpc_instance.set_params(max_iter_predict=50)
assert (qgpc_instance.get_params()['max_iter_predict'] == 50)
|
class TestQGPR():
'Test class for QGPR'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_regression(n_samples=6, n_features=2, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qgpr_fidelity(self) -> QGPR:
'QGPR module with FidelityKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = YZ_CX_EncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = FidelityKernel(encoding_circuit=encoding_circuit, executor=executor)
return QGPR(quantum_kernel=kernel, sigma=1e-06)
@pytest.fixture(scope='module')
def qgpr_pqk(self) -> QGPR:
'QGPR module with ProjectedQuantumKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = YZ_CX_EncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = ProjectedQuantumKernel(encoding_circuit=encoding_circuit, executor=executor)
return QGPR(quantum_kernel=kernel, sigma=1e-06, normalize_y=False, full_regularization=True)
def test_that_qgpr_params_are_present(self):
'Asserts that all classical parameters are present in the QGPR.'
qgpr_instance = QGPR(quantum_kernel=MagicMock())
assert (list(qgpr_instance.get_params(deep=False).keys()) == ['quantum_kernel', 'sigma', 'normalize_y', 'full_regularization'])
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_predict(self, qgpr, request, data):
'Tests concerning the predict function of the QGPR.\n\n Tests include\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qgpr_instance = request.getfixturevalue(qgpr)
(X, y) = data
qgpr_instance.fit(X, y)
y_pred = qgpr_instance.predict(X)
assert (y_pred.shape == y.shape)
assert isinstance(y_pred, np.ndarray)
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_return_cov(self, qgpr, request, data):
'Tests concerning the predict function of the QGPR.\n\n Tests include\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qgpr_instance = request.getfixturevalue(qgpr)
(X, y) = data
qgpr_instance.fit(X, y)
(y_pred, cov) = qgpr_instance.predict(X, return_cov=True)
assert (y_pred.shape == y.shape)
assert isinstance(y_pred, np.ndarray)
assert (cov.shape[0] == cov.shape[1])
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_return_std(self, qgpr, request, data):
'Tests concerning the predict function of the QGPR.\n\n Tests include\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qgpr_instance = request.getfixturevalue(qgpr)
(X, y) = data
qgpr_instance.fit(X, y)
(y_pred, std) = qgpr_instance.predict(X, return_std=True)
assert (y_pred.shape == y.shape)
assert isinstance(y_pred, np.ndarray)
assert (std.shape[0] == X.shape[0])
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_kernel_params_can_be_changed_after_initialization(self, qgpr, request, data):
'Tests concerning the kernel parameter changes.'
qgpr_instance = request.getfixturevalue(qgpr)
qgpr_params = qgpr_instance.get_params()
assert (qgpr_params['num_qubits'] == 3)
assert (qgpr_params['full_regularization'] == True)
qgpr_instance.set_params(num_qubits=4)
qgpr_instance.set_params(full_regularization=False)
qgpr_params_updated = qgpr_instance.get_params()
assert (qgpr_params_updated['num_qubits'] == 4)
assert (qgpr_params_updated['full_regularization'] == False)
(X, y) = data
try:
qgpr_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to quantum kernel parameters'
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_encoding_circuit_params_can_be_changed_after_initialization(self, qgpr, request, data):
'Tests concerning the encoding circuit parameter changes.'
qgpr_instance = request.getfixturevalue(qgpr)
assert (qgpr_instance.get_params()['num_layers'] == 2)
qgpr_instance.set_params(num_layers=4)
assert (qgpr_instance.get_params()['num_layers'] == 4)
(X, y) = data
try:
qgpr_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit paramaeters'
def test_pqk_params_can_be_changes_after_initialization(self, qgpr_pqk, data):
'Tests concerning changes if PQK parameters.'
qgpr_params = qgpr_pqk.get_params()
assert (qgpr_params['gamma'] == 1.0)
assert (qgpr_params['measurement'] == 'XYZ')
qgpr_pqk.set_params(gamma=0.5, measurement='Z')
qgpr_params_updated = qgpr_pqk.get_params()
assert (qgpr_params_updated['gamma'] == 0.5)
assert (qgpr_params_updated['measurement'] == 'Z')
(X, y) = data
try:
qgpr_pqk.fit(X, y)
except:
assert False, f'fitting not possible after changes of PQK paramaeters'
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_classical_params_can_be_changed_after_initialization(self, qgpr, request):
'Test concerning change of classical GPR parameter'
qgpr_instance = request.getfixturevalue(qgpr)
qgpr_params = qgpr_instance.get_params()
assert (qgpr_params['sigma'] == 1e-06)
assert (qgpr_params['normalize_y'] == False)
qgpr_instance.set_params(sigma=0.01)
qgpr_instance.set_params(normalize_y=True)
qgpr_params_updated = qgpr_instance.get_params()
assert (qgpr_params_updated['sigma'] == 0.01)
assert (qgpr_params_updated['normalize_y'] == True)
@pytest.mark.parametrize('qgpr', ['qgpr_fidelity', 'qgpr_pqk'])
def test_that_regularization_is_called_when_not_none(self, qgpr, request, data):
'Asserts that regularization is called.'
qgpr_instance = request.getfixturevalue(qgpr)
(X, y) = data
qgpr_instance.set_params(regularization='tikhonov')
qgpr_instance._quantum_kernel._regularize_matrix = MagicMock()
qgpr_instance._quantum_kernel._regularize_matrix.side_effect = (lambda x: x)
qgpr_instance.fit(X, y)
qgpr_instance.predict(X)
assert (qgpr_instance._quantum_kernel._regularize_matrix.call_count == 3)
|
class TestQKRR():
'Test class for QKRR'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_regression(n_samples=6, n_features=2, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qkrr_fidelity(self) -> QKRR:
'QKRR module with FidelityKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = ParamZFeatureMap(num_qubits=3, num_features=2, num_layers=2, entangling=True)
kernel = FidelityKernel(encoding_circuit=encoding_circuit, executor=executor, regularization='thresholding', mit_depol_noise='msplit')
return QKRR(quantum_kernel=kernel, alpha=1e-06)
@pytest.fixture(scope='module')
def qkrr_pqk(self) -> QKRR:
'QKRR module with ProjectedQuantumKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = ParamZFeatureMap(num_qubits=3, num_features=2, num_layers=2, entangling=True)
kernel = ProjectedQuantumKernel(encoding_circuit=encoding_circuit, executor=executor, regularization='thresholding')
return QKRR(quantum_kernel=kernel, alpha=1e-06)
def test_that_qkrr_params_are_present(self):
'Asserts that all classical parameters are present in the QKRR.'
qkrr_instance = QKRR(quantum_kernel=MagicMock())
assert (list(qkrr_instance.get_params(deep=False).keys()) == ['quantum_kernel', 'alpha'])
@pytest.mark.parametrize('qkrr', ['qkrr_fidelity', 'qkrr_pqk'])
def test_predict(self, qkrr, request, data):
'Tests concerning the predict function of the QKRR.\n\n Tests include\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qkrr_instance = request.getfixturevalue(qkrr)
(X, y) = data
qkrr_instance.fit(X, y)
y_pred = qkrr_instance.predict(X)
assert (y_pred.shape == y.shape)
assert isinstance(y_pred, np.ndarray)
@pytest.mark.parametrize('qkrr', ['qkrr_fidelity', 'qkrr_pqk'])
def test_kernel_params_can_be_changed_after_initialization(self, qkrr, request, data):
'Tests concerning the kernel parameter changes.'
qkrr_instance = request.getfixturevalue(qkrr)
qkrr_params = qkrr_instance.get_params()
assert (qkrr_params['num_qubits'] == 3)
assert (qkrr_params['regularization'] == 'thresholding')
qkrr_instance.set_params(num_qubits=4, regularization='tikhonov')
qkrr_params_updated = qkrr_instance.get_params()
assert (qkrr_params_updated['num_qubits'] == 4)
assert (qkrr_params_updated['regularization'] == 'tikhonov')
(X, y) = data
try:
qkrr_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to quantum kernel parameters'
@pytest.mark.parametrize('qkrr', ['qkrr_fidelity', 'qkrr_pqk'])
def test_encoding_circuit_params_can_be_changed_after_initialization(self, qkrr, request, data):
'Tests concerning the encoding circuit parameter changes.'
qkrr_instance = request.getfixturevalue(qkrr)
assert (qkrr_instance.get_params()['num_layers'] == 2)
qkrr_instance.set_params(num_layers=4)
assert (qkrr_instance.get_params()['num_layers'] == 4)
(X, y) = data
try:
qkrr_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit paramaeters'
def test_pqk_params_can_be_changes_after_initialization(self, qkrr_pqk, data):
'Tests concerning changes if PQK parameters.'
qkrr_params = qkrr_pqk.get_params()
assert (qkrr_params['gamma'] == 1.0)
assert (qkrr_params['measurement'] == 'XYZ')
qkrr_pqk.set_params(gamma=0.5, measurement='Z')
qkrr_params_updated = qkrr_pqk.get_params()
assert (qkrr_params_updated['gamma'] == 0.5)
assert (qkrr_params_updated['measurement'] == 'Z')
(X, y) = data
try:
qkrr_pqk.fit(X, y)
except:
assert False, f'fitting not possible after changes of PQK paramaeters'
@pytest.mark.parametrize('qkrr', ['qkrr_fidelity', 'qkrr_pqk'])
def test_classical_params_can_be_changed_after_initialization(self, qkrr, request):
'Test concerning change of classical KRR parameter'
qkrr_instance = request.getfixturevalue(qkrr)
qkrr_params = qkrr_instance.get_params()
assert (qkrr_params['alpha'] == 1e-06)
qkrr_instance.set_params(alpha=0.01)
qkrr_params_updated = qkrr_instance.get_params()
assert (qkrr_params_updated['alpha'] == 0.01)
@pytest.mark.parametrize('qkrr', ['qkrr_fidelity', 'qkrr_pqk'])
def test_that_regularization_is_called_when_not_none(self, qkrr, request, data):
'Asserts that regularization is called.'
qkrr_instance = request.getfixturevalue(qkrr)
(X, y) = data
qkrr_instance.set_params(regularization='tikhonov')
qkrr_instance._quantum_kernel._regularize_matrix = MagicMock()
qkrr_instance._quantum_kernel._regularize_matrix.side_effect = (lambda x: x)
qkrr_instance.fit(X, y)
qkrr_instance.predict(X)
assert (qkrr_instance._quantum_kernel._regularize_matrix.call_count == 2)
|
class TestQSVC():
'Test class for QSVC.'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_blobs(n_samples=6, n_features=2, centers=2, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qsvc_fidelity(self) -> QSVC:
'QSVC module with FidelityKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = HubregtsenEncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = FidelityKernel(encoding_circuit, executor=executor, regularization='thresholding', mit_depol_noise='msplit')
return QSVC(kernel)
@pytest.fixture(scope='module')
def qsvc_pqk(self) -> QSVC:
'QSVC module wit ProjectedQuantumKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = HubregtsenEncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = ProjectedQuantumKernel(encoding_circuit, executor=executor, regularization='thresholding')
return QSVC(kernel)
def test_that_qsvc_params_are_present(self):
'Asserts that all classical parameters are present in the QSVC.'
qsvc_instance = QSVC(MagicMock())
assert (list(qsvc_instance.get_params(deep=False).keys()) == ['C', 'break_ties', 'cache_size', 'class_weight', 'decision_function_shape', 'max_iter', 'probability', 'random_state', 'shrinking', 'tol', 'verbose', 'quantum_kernel'])
@pytest.mark.parametrize('qsvc', ['qsvc_fidelity', 'qsvc_pqk'])
def test_predict_unfitted(self, qsvc, request, data):
'Tests concerning the unfitted QSVC.\n\n Tests include\n - whether a NotFittedError is raised\n '
qsvc_instance = request.getfixturevalue(qsvc)
(X, _) = data
with pytest.raises(NotFittedError):
qsvc_instance.predict(X)
@pytest.mark.parametrize('qsvc', ['qsvc_fidelity', 'qsvc_pqk'])
def test_predict(self, qsvc, request, data):
'Tests concerning the predict function of the QNNClassifier.\n\n Tests include\n - whether the prediction output is correct\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qsvc_instance = request.getfixturevalue(qsvc)
(X, y) = data
qsvc_instance.fit(X, y)
y_pred = qsvc_instance.predict(X)
assert isinstance(y_pred, np.ndarray)
assert (y_pred.shape == y.shape)
assert np.allclose(y_pred, y)
@pytest.mark.parametrize('qsvc', ['qsvc_fidelity', 'qsvc_pqk'])
def test_kernel_params_can_be_changed_after_initialization(self, qsvc, request, data):
'Tests concerning the kernel parameter changes.'
qsvc_instance = request.getfixturevalue(qsvc)
qsvc_params = qsvc_instance.get_params()
assert (qsvc_params['num_qubits'] == 3)
assert (qsvc_params['regularization'] == 'thresholding')
qsvc_instance.set_params(num_qubits=4, regularization='tikhonov')
qsvc_params_updated = qsvc_instance.get_params()
assert (qsvc_params_updated['num_qubits'] == 4)
assert (qsvc_params_updated['regularization'] == 'tikhonov')
(X, y) = data
try:
qsvc_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to quantum kernel parameters'
@pytest.mark.parametrize('qsvc', ['qsvc_fidelity', 'qsvc_pqk'])
def test_encoding_circuit_params_can_be_changed_after_initialization(self, qsvc, request, data):
'Tests concerning the encoding circuit parameter changes.'
qsvc_instance = request.getfixturevalue(qsvc)
assert (qsvc_instance.get_params()['num_layers'] == 2)
qsvc_instance.set_params(num_layers=4)
assert (qsvc_instance.get_params()['num_layers'] == 4)
(X, y) = data
try:
qsvc_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit parameters'
def test_pqk_params_can_be_changed_after_initialization(self, qsvc_pqk, data):
'Tests concerning the encoding circuit parameter changes.'
qsvc_params = qsvc_pqk.get_params()
assert (qsvc_params['gamma'] == 1.0)
assert (qsvc_params['measurement'] == 'XYZ')
qsvc_pqk.set_params(gamma=0.5, measurement='Z')
qsvc_params_updated = qsvc_pqk.get_params()
assert (qsvc_params_updated['gamma'] == 0.5)
assert (qsvc_params_updated['measurement'] == 'Z')
(X, y) = data
try:
qsvc_pqk.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit parameters'
@pytest.mark.parametrize('qsvc', ['qsvc_fidelity', 'qsvc_pqk'])
def test_classical_params_can_be_changed_after_initialization(self, qsvc, request):
'Tests concerning the parameters of the classical SVC changes.'
qsvc_instance = request.getfixturevalue(qsvc)
assert (qsvc_instance.get_params()['C'] == 1.0)
qsvc_instance.set_params(C=4)
assert (qsvc_instance.get_params()['C'] == 4)
@pytest.mark.parametrize('qsvc', ['qsvc_fidelity', 'qsvc_pqk'])
def test_that_regularization_is_called_when_not_none(self, qsvc, request, data):
'Asserts that regularization is called.'
qsvc_instance = request.getfixturevalue(qsvc)
(X, y) = data
qsvc_instance.set_params(regularization='tikhonov')
qsvc_instance.quantum_kernel._regularize_matrix = MagicMock()
qsvc_instance.quantum_kernel._regularize_matrix.side_effect = (lambda x: x)
qsvc_instance.fit(X, y)
qsvc_instance.predict(X)
assert (qsvc_instance.quantum_kernel._regularize_matrix.call_count == 2)
|
class TestQSVR():
'Test class for QSVR.'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_regression(n_samples=6, n_features=2, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qsvr_fidelity(self) -> QSVR:
'QSVR module with FidelityKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = MultiControlEncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = FidelityKernel(encoding_circuit, executor=executor, regularization='thresholding', mit_depol_noise='msplit')
return QSVR(kernel, C=1, epsilon=0.1)
@pytest.fixture(scope='module')
def qsvr_pqk(self) -> QSVR:
'QSVR module wit ProjectedQuantumKernel.'
np.random.seed(42)
executor = Executor('statevector_simulator')
encoding_circuit = MultiControlEncodingCircuit(num_qubits=3, num_features=2, num_layers=2)
kernel = ProjectedQuantumKernel(encoding_circuit, executor=executor, regularization='thresholding')
return QSVR(kernel, C=1, epsilon=0.1)
def test_that_qsvr_params_are_present(self):
'Asserts that all classical parameters are present in the QSVR.'
qsvr_instance = QSVR(MagicMock())
assert (list(qsvr_instance.get_params(deep=False).keys()) == ['C', 'cache_size', 'epsilon', 'max_iter', 'shrinking', 'tol', 'verbose', 'quantum_kernel'])
@pytest.mark.parametrize('qsvr', ['qsvr_fidelity', 'qsvr_pqk'])
def test_predict_unfitted(self, qsvr, request, data):
'Tests concerning the unfitted QSVR.\n\n Tests include\n - whether a NotFittedError is raised\n '
qsvr_instance = request.getfixturevalue(qsvr)
(X, _) = data
with pytest.raises(NotFittedError):
qsvr_instance.predict(X)
@pytest.mark.parametrize('qsvr', ['qsvr_fidelity', 'qsvr_pqk'])
def test_predict(self, qsvr, request, data):
'Tests concerning the predict function of the QSVR.\n\n Tests include\n - whether the output is of the same shape as the reference\n - whether the type of the output is np.ndarray\n '
qsvr_instance = request.getfixturevalue(qsvr)
(X, y) = data
qsvr_instance.fit(X, y)
y_pred = qsvr_instance.predict(X)
assert isinstance(y_pred, np.ndarray)
assert (y_pred.shape == y.shape)
@pytest.mark.parametrize('qsvr', ['qsvr_fidelity', 'qsvr_pqk'])
def test_kernel_params_can_be_changed_after_initialization(self, qsvr, request, data):
'Tests concerning the kernel parameter changes.'
qsvr_instance = request.getfixturevalue(qsvr)
qsvr_params = qsvr_instance.get_params()
assert (qsvr_params['num_qubits'] == 3)
assert (qsvr_params['regularization'] == 'thresholding')
qsvr_instance.set_params(num_qubits=4, regularization='tikhonov')
qsvr_params_updated = qsvr_instance.get_params()
assert (qsvr_params_updated['num_qubits'] == 4)
assert (qsvr_params_updated['regularization'] == 'tikhonov')
(X, y) = data
try:
qsvr_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to quantum kernel parameters'
@pytest.mark.parametrize('qsvr', ['qsvr_fidelity', 'qsvr_pqk'])
def test_encoding_circuit_params_can_be_changed_after_initialization(self, qsvr, request, data):
'Tests concerning the encoding circuit parameter changes.'
qsvr_instance = request.getfixturevalue(qsvr)
assert (qsvr_instance.get_params()['num_layers'] == 2)
qsvr_instance.set_params(num_layers=4)
assert (qsvr_instance.get_params()['num_layers'] == 4)
(X, y) = data
try:
qsvr_instance.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit parameters'
def test_pqk_params_can_be_changed_after_initialization(self, qsvr_pqk, data):
'Tests concerning the PQK parameter changes.'
qsvr_params = qsvr_pqk.get_params()
assert (qsvr_params['gamma'] == 1.0)
assert (qsvr_params['measurement'] == 'XYZ')
qsvr_pqk.set_params(gamma=0.5, measurement='Z')
qsvr_params_updated = qsvr_pqk.get_params()
assert (qsvr_params_updated['gamma'] == 0.5)
assert (qsvr_params_updated['measurement'] == 'Z')
(X, y) = data
try:
qsvr_pqk.fit(X, y)
except:
assert False, f'fitting not possible after changes to encoding circuit parameters'
@pytest.mark.parametrize('qsvr', ['qsvr_fidelity', 'qsvr_pqk'])
def test_classical_params_can_be_changed_after_initialization(self, qsvr, request):
'Tests concerning the parameters of the classical SVR changes.'
qsvr_instance = request.getfixturevalue(qsvr)
qsvr_params = qsvr_instance.get_params()
assert (qsvr_params['C'] == 1.0)
assert (qsvr_params['epsilon'] == 0.1)
qsvr_instance.set_params(C=4, epsilon=0.5)
qsvr_params_updated = qsvr_instance.get_params()
assert (qsvr_params_updated['C'] == 4)
assert (qsvr_params_updated['epsilon'] == 0.5)
@pytest.mark.parametrize('qsvr', ['qsvr_fidelity', 'qsvr_pqk'])
def test_that_regularization_is_called_when_not_none(self, qsvr, request, data):
'Asserts that regularization is called.'
qsvr_instance = request.getfixturevalue(qsvr)
(X, y) = data
qsvr_instance.set_params(regularization='tikhonov')
qsvr_instance.quantum_kernel._regularize_matrix = MagicMock()
qsvr_instance.quantum_kernel._regularize_matrix.side_effect = (lambda x: x)
qsvr_instance.fit(X, y)
qsvr_instance.predict(X)
assert (qsvr_instance.quantum_kernel._regularize_matrix.call_count == 2)
|
class MockBaseQNN(BaseQNN):
'Mock class for BaseQNN.'
def _fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray=None) -> None:
pass
|
class TestBaseQNN():
'Test class for BaseQNN.'
@pytest.fixture(scope='module')
def qnn_single_op(self) -> MockBaseQNN:
'BaseQNN module with single operator.'
np.random.seed(42)
executor = Executor('statevector_simulator')
pqc = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=2)
operator = IsingHamiltonian(num_qubits=4, I='S', Z='S', ZZ='S')
loss = SquaredLoss()
optimizer = SLSQP(options={'maxiter': 2})
return MockBaseQNN(pqc, operator, executor, loss, optimizer)
@pytest.fixture(scope='module')
def qnn_multi_op(self) -> MockBaseQNN:
'BaseQNN module with multiple operators.'
np.random.seed(42)
executor = Executor('statevector_simulator')
pqc = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=2)
operator = [IsingHamiltonian(num_qubits=4, I='S', Z='S', ZZ='S') for _ in range(5)]
loss = SquaredLoss()
optimizer = SLSQP(options={'maxiter': 2})
return MockBaseQNN(pqc, operator, executor, loss, optimizer)
def test_set_params_invalid_param(self, qnn_single_op: MockBaseQNN):
'\n Test if setting an invalid parameter raises a ValueError.\n\n Args:\n qnn_single_op (MockBaseQNN): The MockBaseQNN object to test.\n\n Returns:\n None\n '
with pytest.raises(ValueError):
qnn_single_op.set_params(invalid_param=3)
def test_set_params_seed(self, qnn_single_op: MockBaseQNN):
'\n Test `set_params` with `parameter_seed`.\n\n Args:\n qnn_single_op (MockBaseQNN): An instance of the `MockBaseQNN` class.\n\n Returns:\n None\n '
qnn_single_op.set_params(parameter_seed=42)
assert (qnn_single_op.parameter_seed == 42)
def test_set_params_num_qubits_single_op(self, qnn_single_op: MockBaseQNN):
'\n Test `set_params` with `num_qubits` for single operator.\n\n Args:\n qnn_single_op (MockBaseQNN): An instance of the `MockBaseQNN` class.\n\n Returns:\n None\n '
qnn_single_op.set_params(num_qubits=5)
assert (qnn_single_op.encoding_circuit.num_qubits == 5)
assert (qnn_single_op.operator.num_qubits == 5)
assert (qnn_single_op._qnn.num_qubits == 5)
def test_set_params_num_qubits_multi_op(self, qnn_multi_op):
'\n Test `set_params` with `num_qubits` for multiple operators.\n\n Args:\n qnn_multi_op (MockBaseQNN): An instance of the `MockBaseQNN` class.\n\n Returns:\n None\n '
qnn_multi_op.set_params(num_qubits=5)
assert (qnn_multi_op.encoding_circuit.num_qubits == 5)
for operator in qnn_multi_op.operator:
assert (operator.num_qubits == 5)
assert (qnn_multi_op._qnn.num_qubits == 5)
def test_set_params_encoding_circuit(self, qnn_single_op):
'\n Test `set_params` for pqc parameters.\n\n Args:\n qnn_single_op (MockBaseQNN): An instance of the `MockBaseQNN` class.\n\n Returns:\n None\n '
qnn_single_op.set_params(num_layers=3, closed=True)
assert (qnn_single_op.encoding_circuit.num_layers == 3)
assert qnn_single_op.encoding_circuit.closed
assert (qnn_single_op._qnn.pqc.get_params()['num_layers'] == 3)
assert qnn_single_op._qnn.pqc.get_params()['closed']
def test_set_params_single_operator(self, qnn_single_op):
'\n Test `set_params` for single operator parameters.\n\n Args:\n qnn_single_op (MockBaseQNN): An instance of the `MockBaseQNN` class.\n\n Returns:\n None\n '
qnn_single_op.set_params(X='S', Z='N')
assert (qnn_single_op.operator.X == 'S')
assert (qnn_single_op.operator.Z == 'N')
assert (qnn_single_op._qnn.operator.X == 'S')
assert (qnn_single_op._qnn.operator.Z == 'N')
def test_set_params_multi_operator(self, qnn_multi_op):
'\n Test `set_params` for multiple operator parameters.\n\n Args:\n qnn_multi_op (MockBaseQNN): An instance of the `MockBaseQNN` class.\n\n Returns:\n None\n '
qnn_multi_op.set_params(op0__X='S', op3__Z='N')
assert (qnn_multi_op.operator[0].X == 'S')
assert (qnn_multi_op.operator[3].Z == 'N')
assert (qnn_multi_op._qnn.operator[0].X == 'S')
assert (qnn_multi_op._qnn.operator[3].Z == 'N')
|
class TestQNNClassifier():
'Test class for QNNClassifier.'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_blobs(n_samples=6, n_features=2, centers=2, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qnn_classifier(self) -> QNNClassifier:
'QNNClassifier module.'
np.random.seed(42)
executor = Executor('statevector_simulator')
pqc = ChebyshevPQC(num_qubits=2, num_features=2, num_layers=1)
operator = SummedPaulis(num_qubits=2)
loss = SquaredLoss()
optimizer = SLSQP(options={'maxiter': 2})
param_ini = np.random.rand(pqc.num_parameters)
param_op_ini = np.random.rand(operator.num_parameters)
return QNNClassifier(pqc, operator, executor, loss, optimizer, param_ini, param_op_ini)
@pytest.fixture(scope='module')
def qnn_classifier_2out(self) -> QNNClassifier:
'QNNClassifier module.'
executor = Executor('statevector_simulator')
pqc = ChebyshevPQC(num_qubits=2, num_features=2, num_layers=1)
operator = [SummedPaulis(num_qubits=2), SummedPaulis(num_qubits=2)]
loss = SquaredLoss()
optimizer = SLSQP(options={'maxiter': 2})
return QNNClassifier(pqc, operator, executor, loss, optimizer, parameter_seed=0)
def test_predict_unfitted(self, qnn_classifier, data):
'Tests concerning the unfitted QNNClassifier.\n\n Tests include\n - whether `_is_fitted` is False\n - whether a RuntimeError is raised\n '
(X, _) = data
assert (not qnn_classifier._is_fitted)
with pytest.raises(RuntimeError, match='The model is not fitted.'):
qnn_classifier.predict(X)
def test_fit(self, qnn_classifier, data):
'Tests concerning the fit function of the QNNClassifier.\n\n Tests include\n - whether `_is_fitted` is set True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
qnn_classifier.fit(X, y)
assert qnn_classifier._is_fitted
assert (not np.allclose(qnn_classifier.param, qnn_classifier.param_ini))
assert (not np.allclose(qnn_classifier.param_op, qnn_classifier.param_op_ini))
def test_fit_2out(self, qnn_classifier_2out, data):
'Tests concerning the fit function of the QNNClassifier with 2 outputs.\n\n Tests include\n - whether `_is_fitted` is set True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
y = np.array([y, y]).T
qnn_classifier_2out.fit(X, y)
assert qnn_classifier_2out._is_fitted
assert (not np.allclose(qnn_classifier_2out.param, qnn_classifier_2out.param_ini))
assert (not np.allclose(qnn_classifier_2out.param_op, qnn_classifier_2out.param_op_ini))
def test_partial_fit(self, qnn_classifier, data):
'Tests concerning the partial_fit function of the QNNClassifier.\n\n Tests include\n - whether `_param` is the same after two calls to fit\n - whether `_param` is different after a call to partial_fit and a call to fit\n '
(X, y) = data
qnn_classifier.fit(X, y)
param_1 = qnn_classifier.param
qnn_classifier.partial_fit(X, y)
param_2 = qnn_classifier.param
qnn_classifier.fit(X, y)
param_3 = qnn_classifier.param
assert np.allclose(param_1, param_3)
assert (not np.allclose(param_2, param_3))
def test_fit_minibtach(self, qnn_classifier, data):
'Tests concerning fit with mini-batch GD.\n\n Tests include\n - whether `_is_fitted` is True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
qnn_classifier._optimizer = Adam({'maxiter_total': 10, 'maxiter': 2, 'lr': 0.1})
qnn_classifier.set_params(batch_size=2, epochs=2, shuffle=True)
qnn_classifier.fit(X, y)
assert qnn_classifier._is_fitted
assert (not np.allclose(qnn_classifier.param, qnn_classifier.param_ini))
assert (not np.allclose(qnn_classifier.param_op, qnn_classifier.param_op_ini))
def test_fit_minibtach_2out(self, qnn_classifier_2out, data):
'Tests concerning fit with mini-batch GD with 2 outputs.\n\n Tests include\n - whether `_is_fitted` is True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
y = np.array([y, y]).T
qnn_classifier_2out._optimizer = Adam({'maxiter_total': 10, 'maxiter': 2, 'lr': 0.1})
qnn_classifier_2out.set_params(batch_size=2, epochs=2, shuffle=True)
qnn_classifier_2out.fit(X, y)
assert qnn_classifier_2out._is_fitted
assert (not np.allclose(qnn_classifier_2out.param, qnn_classifier_2out.param_ini))
assert (not np.allclose(qnn_classifier_2out.param_op, qnn_classifier_2out.param_op_ini))
def test_predict(self, qnn_classifier, data):
'Tests concerning the predict function of the QNNClassifier.\n\n Tests include\n - whether the prediction output is correct\n '
(X, y) = data
qnn_classifier._param = np.linspace(0.1, 0.7, 7)
qnn_classifier._param_op = np.linspace(0.1, 0.3, 3)
qnn_classifier._label_binarizer = LabelBinarizer()
qnn_classifier._label_binarizer.fit(y)
qnn_classifier._is_fitted = True
y_pred = qnn_classifier.predict(X)
assert isinstance(y_pred, np.ndarray)
assert (y_pred.shape == y.shape)
assert np.allclose(y_pred, np.zeros_like(y))
def test_set_params_and_fit(self, qnn_classifier, data):
'\n Tests fit after changing parameters that alter the number of parameters of the pqc.\n\n Tests include\n - whether `_is_fitted` is True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
qnn_classifier.set_params(num_layers=3)
qnn_classifier.fit(X, y)
assert qnn_classifier._is_fitted
assert (not np.allclose(qnn_classifier.param, qnn_classifier.param_ini))
assert (not np.allclose(qnn_classifier.param_op, qnn_classifier.param_op_ini))
|
class TestQNNRegressor():
'Test class for QNNRegressor.'
@pytest.fixture(scope='module')
def data(self) -> tuple[(np.ndarray, np.ndarray)]:
'Test data module.'
(X, y) = make_regression(n_samples=6, n_features=1, random_state=42)
scl = MinMaxScaler((0.1, 0.9))
X = scl.fit_transform(X, y)
return (X, y)
@pytest.fixture(scope='module')
def qnn_regressor(self) -> QNNRegressor:
'QNNRegressor module.'
np.random.seed(42)
executor = Executor('statevector_simulator')
pqc = ChebyshevRx(num_qubits=2, num_features=1, num_layers=1)
operator = SummedPaulis(num_qubits=2)
loss = SquaredLoss()
optimizer = SLSQP(options={'maxiter': 2})
param_ini = np.random.rand(pqc.num_parameters)
param_op_ini = np.random.rand(operator.num_parameters)
return QNNRegressor(pqc, operator, executor, loss, optimizer, param_ini, param_op_ini)
@pytest.fixture(scope='module')
def qnn_regressor_2out(self) -> QNNRegressor:
'QNNRegressor module.'
executor = Executor('statevector_simulator')
pqc = ChebyshevRx(num_qubits=2, num_features=1, num_layers=1)
operator = [SummedPaulis(num_qubits=2), SummedPaulis(num_qubits=2)]
loss = SquaredLoss()
optimizer = SLSQP(options={'maxiter': 2})
return QNNRegressor(pqc, operator, executor, loss, optimizer, parameter_seed=0)
def test_predict_unfitted(self, qnn_regressor, data):
'Tests concerning the unfitted QNNRegressor.\n\n Tests include\n - whether `_is_fitted` is False\n - whether a warning is raised\n - whether the prediction output is correct\n '
(X, y) = data
assert (not qnn_regressor._is_fitted)
with pytest.warns(UserWarning, match='The model is not fitted.'):
y_pred = qnn_regressor.predict(X)
assert isinstance(y_pred, np.ndarray)
assert (y_pred.shape == y.shape)
def test_fit(self, qnn_regressor, data):
'Tests concerning the fit function of the QNNRegressor.\n\n Tests include\n - whether `_is_fitted` is set True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
qnn_regressor.fit(X, y)
assert qnn_regressor._is_fitted
assert (not np.allclose(qnn_regressor.param, qnn_regressor.param_ini))
assert (not np.allclose(qnn_regressor.param_op, qnn_regressor.param_op_ini))
def test_fit_2out(self, qnn_regressor_2out, data):
'Tests concerning the fit function of the QNNRegressor for 2 outputs.\n\n Tests include\n - whether `_is_fitted` is set True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
y = np.array([y, y]).T
qnn_regressor_2out.fit(X, y)
assert qnn_regressor_2out._is_fitted
assert (not np.allclose(qnn_regressor_2out.param, qnn_regressor_2out.param_ini))
assert (not np.allclose(qnn_regressor_2out.param_op, qnn_regressor_2out.param_op_ini))
def test_partial_fit(self, qnn_regressor, data):
'Tests concerning the partial_fit function of the QNNRegressor.\n\n Tests include\n - whether `_param` is the same after two calls to fit\n - whether `_param` is different after a call to partial_fit and a call to fit\n '
(X, y) = data
qnn_regressor.fit(X, y)
param_1 = qnn_regressor.param
qnn_regressor.partial_fit(X, y)
param_2 = qnn_regressor.param
qnn_regressor.fit(X, y)
param_3 = qnn_regressor.param
assert np.allclose(param_1, param_3)
assert (not np.allclose(param_2, param_3))
def test_fit_minibtach(self, qnn_regressor, data):
'Tests concerning fit with mini-batch GD.\n\n Tests include\n - whether `_is_fitted` is True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
qnn_regressor._optimizer = Adam({'maxiter_total': 10, 'maxiter': 2, 'lr': 0.1})
qnn_regressor.set_params(batch_size=2, epochs=2, shuffle=True)
qnn_regressor.fit(X, y)
assert qnn_regressor._is_fitted
assert (not np.allclose(qnn_regressor.param, qnn_regressor.param_ini))
assert (not np.allclose(qnn_regressor.param_op, qnn_regressor.param_op_ini))
def test_fit_minibtach_2out(self, qnn_regressor_2out, data):
'Tests concerning fit with mini-batch GD for two outputs.\n\n Tests include\n - whether `_is_fitted` is True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
y = np.array([y, y]).T
qnn_regressor_2out._optimizer = Adam({'maxiter_total': 10, 'maxiter': 2, 'lr': 0.1})
qnn_regressor_2out.set_params(batch_size=2, epochs=2, shuffle=True)
qnn_regressor_2out.fit(X, y)
assert qnn_regressor_2out._is_fitted
assert (not np.allclose(qnn_regressor_2out.param, qnn_regressor_2out.param_ini))
assert (not np.allclose(qnn_regressor_2out.param_op, qnn_regressor_2out.param_op_ini))
def test_predict(self, qnn_regressor, data):
'Tests concerning the predict function of the QNNRegressor.\n\n Tests include\n - whether the prediction output is correct\n '
(X, y) = data
qnn_regressor._param = np.linspace(0.1, 0.4, 4)
qnn_regressor._param_op = np.linspace(0.1, 0.3, 3)
qnn_regressor._is_fitted = True
y_pred = qnn_regressor.predict(X)
assert isinstance(y_pred, np.ndarray)
assert (y_pred.shape == y.shape)
assert np.allclose(y_pred, np.array([0.50619332, 0.4905991, 0.51004432, 0.48826691, 0.5372742, 0.48826651]))
def test_set_params_and_fit(self, qnn_regressor, data):
'\n Tests fit after changing parameters that alter the number of parameters of the pqc.\n\n Tests include\n - whether `_is_fitted` is True\n - whether `_param` is updated\n - whether `_param_op` is updated\n '
(X, y) = data
qnn_regressor.set_params(num_layers=3)
qnn_regressor.fit(X, y)
assert qnn_regressor._is_fitted
assert (not np.allclose(qnn_regressor.param, qnn_regressor.param_ini))
assert (not np.allclose(qnn_regressor.param_op, qnn_regressor.param_op_ini))
|
class TestSolvemini_batch():
'Tests for mini-batch gradient descent.'
pqc = ChebyshevPQC(4, 1, 3, False)
cost_op = SummedPaulis(4)
qnn = QNN(pqc, cost_op, executor)
ex_1 = [np.arange(0.1, 0.9, 0.01), np.log(np.arange(0.1, 0.9, 0.01))]
def test_wrong_optimizer(self):
'Test for error caused by wrong optimizer type.'
param_ini = (np.random.rand(self.qnn.num_parameters) * 4)
param_op_ini = np.ones((self.qnn.num_qubits + 1))
with pytest.raises(TypeError, match='is not supported for mini-batch gradient descent.'):
train_mini_batch(self.qnn, self.ex_1[0], self.ex_1[1], param_ini, param_op_ini, loss=SquaredLoss(), optimizer=SLSQP(), batch_size=10, epochs=30)
|
class TestShotsFromRSTD():
'Tests for ShotsFromRSTD.'
def test_qnn_training(self):
'Test a optimization with variance reduction and shots from RSTD.'
pqc = ChebyshevPQC(2, 1, 3, False)
ob = SummedPaulis(2)
executor = Executor('qasm_simulator', primitive_seed=0)
qnn = QNNRegressor(pqc, ob, executor, SquaredLoss(), Adam(options={'lr': 0.3, 'maxiter': 3}), variance=0.005, shot_control=ShotsFromRSTD(), parameter_seed=0)
x_train = np.arange((- 0.2), 0.3, 0.1)
y_train = np.abs(x_train)
qnn.fit(x_train, y_train)
test = qnn.predict(x_train)
reference = np.array([0.31176001, 0.09348281, (- 0.05118243), (- 0.25693387), (- 0.43025503)])
assert np.allclose(test, reference, atol=0.001)
def test_qnn_training_two_outputs(self):
'Test a optimization with variance reduction and shots from RSTD with two outputs.'
pqc = ChebyshevPQC(2, 1, 3, False)
ob = [SummedPaulis(2), SummedPaulis(2)]
executor = Executor('qasm_simulator', primitive_seed=0)
qnn = QNNRegressor(pqc, ob, executor, SquaredLoss(), Adam(options={'lr': 0.3, 'maxiter': 3}), variance=0.005, shot_control=ShotsFromRSTD(), parameter_seed=0)
x_train = np.arange((- 0.2), 0.3, 0.1)
y_train = np.array([np.abs(x_train), np.square(x_train)]).T
qnn.fit(x_train, y_train)
test = qnn.predict(x_train)
reference = np.array([[0.09296101, 0.08074864], [0.12179584, 0.08045381], [0.06871516, 0.06971483], [0.08291836, 0.05942195], [0.09998995, 0.05452198]])
assert np.allclose(test, reference, atol=0.001)
|
class TestZeroParam():
'Tests for zero number of parameters in both observable and encoding circuit.'
def _build_qnn_setup(self, pqc, ob, test_case: str):
'Helper function to build the qnn setup.\n\n Args:\n pqc (PQC): encoding circuit\n ob (Observable): observable\n test_case (str): test case type\n\n '
executor = Executor('statevector_simulator')
if (test_case == 'QNNRegressor'):
qnn = QNNRegressor(pqc, ob, executor, SquaredLoss(), SLSQP({'maxiter': 10}), variance=0.005, parameter_seed=0)
x_train = np.arange((- 0.2), 0.3, 0.1)
y_train = np.abs(x_train)
else:
qnn = QNNClassifier(pqc, ob, executor, SquaredLoss(), SLSQP({'maxiter': 10}), variance=0.005, parameter_seed=0)
x_train = np.arange((- 0.2), 0.3, 0.1)
y_train = np.array([0, 1, 1, 0, 0])
return (qnn, x_train, y_train)
@pytest.mark.parametrize('test_case', ['QNNRegressor', 'QNNClassifier'])
def test_zero_param_ob(self, test_case):
'Test for zero number of parameters in observable.'
assert_dict = {'QNNRegressor': np.array([0.11503425, 0.10989764, 0.11377155, 0.12618358, 0.14544058]), 'QNNClassifier': np.array([0, 0, 0, 0, 0])}
pqc = ChebyshevPQC(2, 1, 1)
ob = SinglePauli(2, 0, 'Z')
(qnn, x_train, y_train) = self._build_qnn_setup(pqc, ob, test_case)
assert (qnn.num_parameters_observable == 0)
qnn.fit(x_train, y_train)
assert np.allclose(qnn.predict(x_train), assert_dict[test_case], atol=1e-06)
@pytest.mark.parametrize('test_case', ['QNNRegressor', 'QNNClassifier'])
def test_zero_param(self, test_case):
'Test for zero number of parameters in encoding circuit.'
assert_dict = {'QNNRegressor': np.array([0.12, 0.12, 0.12, 0.12, 0.12]), 'QNNClassifier': np.array([1, 0, 0, 0, 0])}
pqc = HighDimEncodingCircuit(2, 1)
ob = SummedPaulis(2)
(qnn, x_train, y_train) = self._build_qnn_setup(pqc, ob, test_case)
assert (qnn.num_parameters == 0)
qnn.fit(x_train, y_train)
assert np.allclose(qnn.predict(x_train), assert_dict[test_case], atol=1e-06)
@pytest.mark.parametrize('test_case', ['QNNRegressor', 'QNNClassifier'])
def test_all_zero(self, test_case):
'Test for zero number of parameters in both observable and encoding circuit.'
assert_dict = {'QNNRegressor': np.array([0.19470917, 0.09933467, 0.0, (- 0.09933467), (- 0.19470917)]), 'QNNClassifier': np.array([0, 0, 0, 0, 0])}
pqc = HighDimEncodingCircuit(2, 1)
ob = SinglePauli(2, 0, 'Z')
(qnn, x_train, y_train) = self._build_qnn_setup(pqc, ob, test_case)
assert (qnn.num_parameters_observable == 0)
assert (qnn.num_parameters == 0)
qnn.fit(x_train, y_train)
assert np.allclose(qnn.predict(x_train), assert_dict[test_case], atol=1e-06)
|
class TestOpTreeDerivative():
'Test class for OpTree derivatives.'
def test_derivative(self):
'Function for comparing analytical and numerical derivatives'
p = ParameterVector('p', 1)
qc = QuantumCircuit(2)
qc.rx((2.0 * p[0]), 0)
qc.rx((10.0 * np.arccos(p[0])), 1)
qc.cx(0, 1)
operator = SparsePauliOp(['IZ', 'ZI'])
p_val = np.arange((- 0.5), 0.5, 0.01)
p_array = [{p[0]: p_} for p_ in p_val]
val = OpTree.evaluate.evaluate_with_estimator(qc, operator, p_array, {}, Estimator())
qc_d = OpTree.derivative.differentiate(qc, p[0])
val_d = OpTree.evaluate.evaluate_with_estimator(qc_d, operator, p_array, {}, Estimator())
qc_dd = OpTree.derivative.differentiate(qc_d, p[0])
val_dd = OpTree.evaluate.evaluate_with_estimator(qc_dd, operator, p_array, {}, Estimator())
assert (np.linalg.norm(np.abs((np.gradient(val, p_val)[1:(- 1)] - val_d[1:(- 1)]))) < 0.15)
assert (np.linalg.norm(np.abs((np.gradient(val_d, p_val)[2:(- 2)] - val_dd[2:(- 2)]))) < 1.5)
def test_qc_gradient(self):
'Function for testing derivatives of the circuit'
p = ParameterVector('p', 4)
x = ParameterVector('x', 1)
qc = QuantumCircuit(2)
qc.rx((p[0] * np.arccos(x[0])), 0)
qc.rx((p[1] * np.arccos(x[0])), 1)
qc.ry(p[2], 0)
qc.ry(p[3], 1)
qc.rxx((p[0] * np.arccos(x[0])), 0, 1)
operator = SparsePauliOp(['IZ', 'ZI'])
dictionary = {x[0]: 0.5, p[0]: 1.5, p[1]: 2.5, p[2]: 0.5, p[3]: 0.25}
qc_grad = OpTree.derivative.differentiate(qc, p)
qc_grad_v2 = OpTree.derivative.differentiate_v2(qc, p)
reference_grad = np.array([1.12973299, 0.12954041, 5.55111512e-17, 0.839102771])
assert np.allclose(OpTree.evaluate.evaluate_with_estimator(qc_grad, operator, dictionary, {}, Estimator()), reference_grad)
assert np.allclose(OpTree.evaluate.evaluate_with_estimator(qc_grad_v2, operator, dictionary, {}, Estimator()), reference_grad)
qc_dx = OpTree.derivative.differentiate(qc, x)
qc_dx_v2 = OpTree.derivative.differentiate_v2(qc, x)
reference_dx = np.array([(- 2.22566018)])
print('ref2', OpTree.evaluate.evaluate_with_estimator(qc_dx, operator, dictionary, {}, Estimator()))
assert np.allclose(OpTree.evaluate.evaluate_with_estimator(qc_dx, operator, dictionary, {}, Estimator()), reference_dx)
assert np.allclose(OpTree.evaluate.evaluate_with_estimator(qc_dx_v2, operator, dictionary, {}, Estimator()), reference_dx)
def test_operator_gradient(self):
'Function for testing derivatives of the operator'
p = ParameterVector('p', 4)
dictionary_p = {p[0]: 1.5, p[1]: 2.5, p[2]: 0.5, p[3]: 0.25}
operator = SparsePauliOp(['IZ', 'ZI', 'IX', 'XI'], [p[0], p[1], p[2], p[3]])
operator = operator.power(2)
qc = QuantumCircuit(2)
qc.h([0, 1])
op_grad = OpTree.derivative.differentiate(operator, p)
op_grad_v2 = OpTree.derivative.differentiate_v2(operator, p)
reference_values = np.array([3.0, 5.0, 1.5, 1.5])
assert np.allclose(OpTree.evaluate.evaluate_with_estimator(qc, op_grad, {}, dictionary_p, Estimator()), reference_values)
assert np.allclose(OpTree.evaluate.evaluate_with_estimator(qc, op_grad_v2, {}, dictionary_p, Estimator()), reference_values)
operator_z = OpTree.evaluate.transform_to_zbasis(operator)
op_grad_z = OpTree.derivative.differentiate(operator_z, p)
op_grad_z_v2 = OpTree.derivative.differentiate_v2(operator_z, p)
assert np.allclose(OpTree.evaluate.evaluate_with_sampler(qc, op_grad_z, {}, dictionary_p, Sampler()), reference_values)
assert np.allclose(OpTree.evaluate.evaluate_with_sampler(qc, op_grad_z_v2, {}, dictionary_p, Sampler()), reference_values)
assert np.allclose(OpTree.evaluate.evaluate_with_sampler(qc, OpTree.evaluate.transform_to_zbasis(op_grad), {}, dictionary_p, Sampler()), reference_values)
assert np.allclose(OpTree.evaluate.evaluate_with_sampler(qc, OpTree.evaluate.transform_to_zbasis(op_grad_v2), {}, dictionary_p, Sampler()), reference_values)
|
class TestOpTreeEvaluation():
'Test class for OpTree evaluation'
@pytest.fixture(scope='module')
def _create_random_circuits(self) -> OpTreeList:
'Creates the random circuits used in the tests'
circuit1 = random_circuit(2, 2, seed=2).decompose(reps=1)
circuit2 = random_circuit(2, 2, seed=0).decompose(reps=1)
return OpTreeList([circuit1, circuit2])
@pytest.fixture(scope='module')
def _create_param_circuits(self) -> Tuple[(OpTreeList, List[dict])]:
p = ParameterVector('p', 2)
circuit1 = QuantumCircuit(2)
circuit1.rx(p[0], 0)
circuit1.rx(p[1], 1)
circuit2 = QuantumCircuit(2)
circuit2.ry(p[0], 0)
circuit2.ry(p[1], 1)
dictionary1 = {p[0]: 0.25, p[1]: 0.5}
dictionary2 = {p[0]: 0.33, p[1]: 0.44}
return (OpTreeList([circuit1, circuit2]), [dictionary1, dictionary2])
@pytest.fixture(scope='module')
def _create_operator_z(self) -> Tuple[(OpTreeSum, List[dict])]:
'Creates the Z-based operators used in the tests'
x = ParameterVector('x', 2)
observable1 = SparsePauliOp(['IZ', 'ZI'], [x[0], x[1]])
observable2 = SparsePauliOp(['II', 'ZZ'], [x[0], x[1]])
observable = OpTreeSum([observable1, observable2])
dictionary1 = {x[0]: 1.0, x[1]: 0.5}
dictionary2 = {x[0]: 0.3, x[1]: 0.2}
return (observable, [dictionary1, dictionary2])
@pytest.fixture(scope='module')
def _create_operator_xy(self) -> Tuple[(OpTreeSum, dict)]:
'Creates the XY-based operators used in the tests'
x = ParameterVector('x', 2)
observable1 = SparsePauliOp(['XY', 'YX'], [x[0], x[1]])
observable2 = SparsePauliOp(['ZZ', 'YY'], [x[0], x[1]])
observable = OpTreeSum([observable1, observable2])
dictionary = {x[0]: 1.0, x[1]: 0.5}
return (observable, dictionary)
def test_estimator_z(self, _create_random_circuits, _create_operator_z):
'Tests the estimator with Z basis operators\n\n Args:\n _create_random_circuits (Tuple[OpTreeList, List[dict]]): The circuits and dictionaries.\n _create_operator_z (Tuple[OpTreeSum, List[dict]]): The operators and dictionaries.\n '
reference_values = np.array([1.0500197668853382, 1.2589029364313136])
val = OpTree.evaluate.evaluate_with_estimator(_create_random_circuits, _create_operator_z[0], {}, _create_operator_z[1][0], Estimator())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_z[0])
val = OpTree.evaluate.evaluate_tree_with_estimator(expectation_tree, _create_operator_z[1][0], Estimator())
assert np.allclose(val, reference_values)
def test_sampler_z(self, _create_random_circuits, _create_operator_z):
'Tests the sampler with Z basis operators\n\n Args:\n _create_random_circuits (Tuple[OpTreeList, List[dict]]): The circuits and dictionaries.\n _create_operator_z (Tuple[OpTreeSum, List[dict]]): The operators and dictionaries.\n '
reference_values = np.array([1.0500197668853386, 1.258902936431313])
val = OpTree.evaluate.evaluate_with_sampler(_create_random_circuits, _create_operator_z[0], {}, _create_operator_z[1][0], Sampler())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_z[0])
val = OpTree.evaluate.evaluate_tree_with_sampler(expectation_tree, _create_operator_z[1][0], Sampler())
assert np.allclose(val, reference_values)
def test_estimator_xy(self, _create_random_circuits, _create_operator_xy):
'\n Tests the estimator with Z basis operators\n\n Args:\n _create_random_circuits (Tuple[OpTreeList, List[dict]]): The circuits and dictionaries.\n _create_operator_xy (Tuple[OpTreeSum, dict]): The operators and dictionary.\n '
reference_values = np.array([(- 0.299986822076441), (- 0.5531057723847069)])
val = OpTree.evaluate.evaluate_with_estimator(_create_random_circuits, _create_operator_xy[0], {}, _create_operator_xy[1], Estimator())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_xy[0])
val = OpTree.evaluate.evaluate_tree_with_estimator(expectation_tree, _create_operator_xy[1], Estimator())
assert np.allclose(val, reference_values)
def test_sampler_xy(self, _create_random_circuits, _create_operator_xy):
'Tests the estimator with Z basis operators\n\n Args:\n _create_random_circuits (Tuple[OpTreeList, List[dict]]): The circuits and dictionaries.\n _create_operator_xy (Tuple[OpTreeSum, dict]): The operators and dictionary.\n '
reference_values = np.array([(- 0.299986822076441), (- 0.5531057723847069)])
with pytest.raises(ValueError):
OpTree.evaluate.evaluate_with_sampler(_create_random_circuits, _create_operator_xy[0], {}, _create_operator_xy[1], Sampler())
op_in_z_base = OpTree.evaluate.transform_to_zbasis(_create_operator_xy[0])
val = OpTree.evaluate.evaluate_with_sampler(_create_random_circuits, op_in_z_base, {}, _create_operator_xy[1], Sampler())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_xy[0])
with pytest.raises(ValueError):
OpTree.evaluate.evaluate_tree_with_sampler(expectation_tree, _create_operator_xy[1], Sampler())
expectation_tree_in_z_base = OpTree.evaluate.transform_to_zbasis(expectation_tree)
val = OpTree.evaluate.evaluate_tree_with_sampler(expectation_tree_in_z_base, _create_operator_xy[1], Sampler())
assert np.allclose(val, reference_values)
def test_estimator_multi_dict(self, _create_param_circuits, _create_operator_z):
'\n Checks the functionality of the estimator with multiple dictionaries.\n\n Args:\n _create_param_circuits (Tuple[OpTreeList, List[dict]]): The circuits and dictionaries.\n _create_operator_z (Tuple[OpTreeSum, List[dict]]): The operators and dictionaries.\n '
reference_values = np.array([[[2.83285403, 2.83285403], [0.93625037, 0.93625037]], [[2.82638487, 2.82638487], [0.93594971, 0.93594971]]])
val = OpTree.evaluate.evaluate_with_estimator(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Estimator())
assert np.allclose(val, reference_values)
reference_values = np.array([[2.83285403, 2.83285403], [0.93594971, 0.93594971]])
val = OpTree.evaluate.evaluate_with_estimator(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Estimator(), dictionaries_combined=True)
assert np.allclose(val, reference_values)
def test_sampler_multi_dict(self, _create_param_circuits, _create_operator_z):
'\n Checks the functionality of the sampler with multiple dictionaries.\n\n Args:\n _create_param_circuits (Tuple[OpTreeList, List[dict]]): The circuits and dictionaries.\n _create_operator_z (Tuple[OpTreeSum, List[dict]]): The operators and dictionaries.\n\n '
reference_values = np.array([[[2.83285403, 2.83285403], [0.93625037, 0.93625037]], [[2.82638487, 2.82638487], [0.93594971, 0.93594971]]])
val = OpTree.evaluate.evaluate_with_sampler(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Sampler())
assert np.allclose(val, reference_values)
reference_values = np.array([[2.83285403, 2.83285403], [0.93594971, 0.93594971]])
val = OpTree.evaluate.evaluate_with_sampler(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Sampler(), dictionaries_combined=True)
assert np.allclose(val, reference_values)
|
class TestExecutor():
@pytest.fixture(scope='module')
def ExecutorSampler(self) -> Executor:
'Executor with Sampler initialization.'
return Executor(Sampler(), primitive_seed=0)
@pytest.fixture(scope='module')
def ExecutorEstimator(self) -> Executor:
'Executor with Estimator initialization.'
return Executor(Estimator(), primitive_seed=0)
@pytest.fixture(scope='module')
def ExecutorStatevector(self) -> Executor:
'Executor with statevector_simulator initialization.'
return Executor('statevector_simulator', primitive_seed=0)
@pytest.fixture(scope='module')
def ExecutorQasm(self) -> Executor:
'Executor with qasm_simulator initialization.'
return Executor('qasm_simulator', primitive_seed=0)
@pytest.fixture(scope='module')
def ExecutorBackendSampler(self) -> Executor:
'Executor with BackendSampler initialization.'
return Executor(BackendSampler(Aer.get_backend('qasm_simulator')), primitive_seed=0)
@pytest.fixture(scope='module')
def ExecutorBackendEstimator(self) -> Executor:
'Executor with BackendEstimator initialization.'
return Executor(BackendEstimator(Aer.get_backend('qasm_simulator')), primitive_seed=0)
@pytest.fixture(scope='module')
def simple_circuit(self):
'Creates a simple circuit for testing.'
qc = QuantumCircuit(2)
qc.x([0, 1])
return qc
@pytest.fixture(scope='module')
def observable(self):
'Creates a simple observable for testing.'
return SparsePauliOp('ZZ')
@pytest.mark.parametrize('executor_str', ['ExecutorSampler', 'ExecutorEstimator', 'ExecutorStatevector', 'ExecutorQasm', 'ExecutorBackendSampler', 'ExecutorBackendEstimator'])
def test_shots(self, executor_str, request):
'Tests of the default shots and the set_shots method work.'
executor = request.getfixturevalue(executor_str)
assert_dict = {'ExecutorSampler': None, 'ExecutorEstimator': None, 'ExecutorStatevector': None, 'ExecutorQasm': 1024, 'ExecutorBackendSampler': 1024, 'ExecutorBackendEstimator': 1024}
assert (executor.shots == assert_dict[executor_str])
executor.set_shots(1234)
assert (executor.shots == 1234)
assert (executor.get_shots() == 1234)
@pytest.mark.parametrize('executor_str', ['ExecutorSampler', 'ExecutorEstimator', 'ExecutorStatevector', 'ExecutorQasm', 'ExecutorBackendSampler', 'ExecutorBackendEstimator'])
def test_sampler(self, executor_str, request, simple_circuit):
'Tests the Executor Sampler Primitive'
assert_dict = {'ExecutorSampler': {3: 1.0}, 'ExecutorEstimator': {3: 1.0}, 'ExecutorStatevector': {3: 1.0}, 'ExecutorQasm': {3: 1.0}, 'ExecutorBackendSampler': {3: 1.0}, 'ExecutorBackendEstimator': {3: 1.0}}
executor = request.getfixturevalue(executor_str)
executor.set_shots(100)
circuit = simple_circuit.measure_all(inplace=False)
res = executor.get_sampler().run(circuit).result()
assert (res.metadata[0]['shots'] == 100)
assert (res.quasi_dists[0] == assert_dict[executor_str])
@pytest.mark.parametrize('executor_str', ['ExecutorSampler', 'ExecutorEstimator', 'ExecutorStatevector', 'ExecutorQasm', 'ExecutorBackendSampler', 'ExecutorBackendEstimator'])
def test_executor(self, executor_str, request, simple_circuit, observable):
'Tests the Executor Estimator Primitive'
assert_dict = {'ExecutorSampler': np.array([1.0]), 'ExecutorEstimator': np.array([1.0]), 'ExecutorStatevector': np.array([1.0]), 'ExecutorQasm': np.array([1.0]), 'ExecutorBackendSampler': np.array([1.0]), 'ExecutorBackendEstimator': np.array([1.0])}
executor = request.getfixturevalue(executor_str)
executor.set_shots(100)
res = executor.get_estimator().run(simple_circuit, observable).result()
assert (res.metadata[0]['shots'] == 100)
assert np.allclose(assert_dict[executor_str], res.values[0])
|
class kernel():
FULL_KERNEL_3 = np.ones((3, 3), np.uint8)
FULL_KERNEL_5 = np.ones((5, 5), np.uint8)
FULL_KERNEL_7 = np.ones((7, 7), np.uint8)
FULL_KERNEL_9 = np.ones((9, 9), np.uint8)
FULL_KERNEL_31 = np.ones((31, 31), np.uint8)
def cross_kernel_3(self):
self.CROSS_KERNEL_3 = np.asarray([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
return self.CROSS_KERNEL_3
def cross_kernel_5(self):
self.CROSS_KERNEL_5 = np.asarray([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0]], dtype=np.uint8)
return self.CROSS_KERNEL_5
def diamond_kernel_5(self):
self.DIAMOND_KERNEL_5 = np.array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]], dtype=np.uint8)
return self.DIAMOND_KERNEL_5
def cross_kernel_7(self):
self.CROSS_KERNEL_7 = np.asarray([[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]], dtype=np.uint8)
return self.CROSS_KERNEL_7
def diamond_kernel_7(self):
self.DIAMOND_KERNEL_7 = np.asarray([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]], dtype=np.uint8)
return self.DIAMOND_KERNEL_7
|
class DepthCompletion():
def __init__(self):
self.main_img_path = os.path.expanduser('dataset\\kitti_validation_cropped\\image')
self.input_depth_dir = os.path.expanduser('dataset\\kitti_validation_cropped\\velodyne_raw')
self.img_size = (450, 130)
def save_for_evaluation(self, sufficient_depth, img_name):
path = 'outputs/kitti/depth_for_evaluation/'
cv2.imwrite((path + img_name), sufficient_depth)
def save_final_outputs(self, img, img_name):
path = 'outputs/kitti/final_output/'
img = cv2.applyColorMap(np.uint8(((img / np.amax(img)) * 255)), cv2.COLORMAP_JET)
cv2.imwrite((path + img_name), img)
def process(self):
main_img_pathes = os.listdir(self.main_img_path)
main_image_list = []
for item in main_img_pathes:
main_image_list.append(cv2.imread(((self.main_img_path + '/') + item)))
img_pathes = os.listdir(self.input_depth_dir)
image_list = []
for item in img_pathes:
image_list.append(cv2.imread(((self.input_depth_dir + '/') + item), cv2.IMREAD_ANYDEPTH))
num_images = len(image_list)
for i in range(num_images):
depth_image = image_list[i]
main_image = main_image_list[i]
projected_depths = np.float32((depth_image / 255.0))
(final_depths, process_dict) = design_depth_map.create_map(main_image, projected_depths, show_process=True)
self.show_result(process_dict, main_image)
self.save_for_evaluation(process_dict['s9_depths_out'], img_pathes[i])
self.save_final_outputs(process_dict['s9_depths_out'], img_pathes[i])
import metrics
metrics.print_metrics()
def show_image(self, window_name, image, size_wh=None, location_xy=None):
if (size_wh is not None):
cv2.namedWindow(window_name, (cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL))
cv2.resizeWindow(window_name, *size_wh)
else:
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
if (location_xy is not None):
cv2.moveWindow(window_name, *location_xy)
cv2.imshow(window_name, image)
def show_result(self, process_dict, main_image):
x_offset = self.img_size[0]
y_offset = self.img_size[1]
x_padding = 0
y_padding = 28
x_start = 0
y_start = 100
img_x = x_start
img_y = y_start
max_x = 1500
row_idx = 0
for (key, value) in process_dict.items():
if (key == 'main_image'):
image_jet = main_image
self.show_image(key, image_jet, self.img_size, (img_x, img_y))
img_x += (x_offset + x_padding)
if (((img_x + x_offset) + x_padding) > max_x):
img_x = x_start
row_idx += 1
img_y = (y_start + (row_idx * (y_offset + y_padding)))
else:
image_jet = cv2.applyColorMap(np.uint8(((value / np.amax(value)) * 255)), cv2.COLORMAP_JET)
self.show_image(key, image_jet, self.img_size, (img_x, img_y))
img_x += (x_offset + x_padding)
if (((img_x + x_offset) + x_padding) > max_x):
img_x = x_start
row_idx += 1
img_y = (y_start + (row_idx * (y_offset + y_padding)))
cv2.waitKey(delay=1)
|
class Metrics():
def calculate_metrics_mm(self, output, gt_item):
valid_mask = (gt_item > 0.1)
output_mm = (1000.0 * output[valid_mask])
gt_mm = (1000.0 * gt_item[valid_mask])
diff = np.abs((output_mm - gt_mm))
mse = np.mean(np.power(diff, 2))
rmse = np.sqrt(mse)
mae = np.mean(diff)
return (rmse, mae)
|
def print_metrics():
print('Calculating Metrics ....')
x = Metrics()
mae = []
rmse = []
for i in range(len(gt)):
(_rmse, _mae) = x.calculate_metrics_mm(results[i], gt[i])
rmse.append(_rmse)
mae.append(_mae)
print('Evaluation Metrics : \n \nAverage RMSE = {h1} \nAverage MAE = {h2} \nMin RMSE = {min} \nMin MAE = {minmae} \nMax RMSE = {maxr} \nMax MAE = {maxm} '.format(h1=np.mean(rmse), h2=np.mean(mae), min=np.min(rmse), minmae=np.min(mae), maxr=np.max(rmse), maxm=np.max(mae)))
|
class Config(object):
def __init__(self, filename):
lines = open(filename).readlines()
lines = [(l if (not l.strip().startswith('#')) else '\n') for l in lines]
s = ''.join(lines)
self._entries = json.loads(s, object_pairs_hook=OrderedDict)
def has(self, key):
return (key in self._entries)
def _value(self, key, dtype, default):
if (default is not None):
assert isinstance(default, dtype)
if (key in self._entries):
val = self._entries[key]
if isinstance(val, dtype):
return val
else:
raise TypeError()
else:
assert (default is not None)
return default
def _list_value(self, key, dtype, default):
if (default is not None):
assert isinstance(default, list)
for x in default:
assert isinstance(x, dtype)
if (key in self._entries):
val = self._entries[key]
assert isinstance(val, list)
for x in val:
assert isinstance(x, dtype)
return val
else:
assert (default is not None)
return default
def bool(self, key, default=None):
return self._value(key, bool, default)
def string(self, key, default=None):
if isinstance(default, str):
default = str(default)
return self._value(key, str, default)
def int(self, key, default=None):
return self._value(key, int, default)
def float(self, key, default=None):
return self._value(key, float, default)
def dict(self, key, default=None):
return self._value(key, dict, default)
def int_key_dict(self, key, default=None):
if (default is not None):
assert isinstance(default, dict)
for k in list(default.keys()):
assert isinstance(k, int)
dict_str = self.string(key, '')
if (dict_str == ''):
assert (default is not None)
res = default
else:
res = eval(dict_str)
assert isinstance(res, dict)
for k in list(res.keys()):
assert isinstance(k, int)
return res
def int_list(self, key, default=None):
return self._list_value(key, int, default)
def float_list(self, key, default=None):
return self._list_value(key, float, default)
def string_list(self, key, default=None):
return self._list_value(key, str, default)
def dir(self, key, default=None):
p = self.string(key, default)
if (p[(- 1)] != '/'):
return (p + '/')
else:
return p
|
class Engine():
def __init__(self, config, session=None):
self.config = config
self.save = config.bool('save', True)
self.task = config.string('task', 'train')
self.dataset = config.string('dataset').lower()
self.num_epochs = config.int('num_epochs', 1000)
self.session = self._create_session(session)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
need_train = True
if need_train:
self.train_data = load_dataset(config, 'train', self.session, self.dataset)
freeze_batchnorm = config.bool('freeze_batchnorm', False)
print('creating trainnet...', file=log.v1)
self.train_network = Network(self.config, self.train_data, is_trainnet=True, freeze_batchnorm=freeze_batchnorm, name='trainnet')
else:
self.train_data = None
self.train_network = None
need_val = (self.task != 'train_no_val')
if need_val:
self.valid_data = load_dataset(config, 'val', self.session, self.dataset)
print('creating testnet...', file=log.v1)
reuse_variables = (None if need_train else False)
self.test_network = Network(config, self.valid_data, is_trainnet=False, freeze_batchnorm=True, name='testnet', reuse_variables=reuse_variables)
else:
self.valid_data = None
self.test_network = None
self.trainer = Trainer(config, self.train_network, self.test_network, self.global_step, self.session)
self.saver = Saver(config, self.session)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.start_epoch = self.saver.try_load_weights()
self.session.graph.finalize()
@staticmethod
def _create_session(sess):
if (sess is None):
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=sess_config)
return sess
def run(self):
if (self.task in ('train', 'train_no_val')):
self.train()
elif (self.task == 'eval'):
self.eval()
else:
assert False, ('unknown task', self.task)
def test_dataset_speed(self):
n_total = self.train_data.n_examples_per_epoch()
batch_size = self.config.int('batch_size')
input_tensors_dict = self.train_network.input_tensors_dict
n_curr = 0
with Timer(message='elapsed'):
while (n_curr < n_total):
self.session.run(input_tensors_dict)
n_curr += batch_size
print('{:>5}'.format(n_curr), '/', n_total)
def train(self):
print('starting training', file=log.v1)
for epoch in range(self.start_epoch, self.num_epochs):
timer = Timer()
train_measures = self.run_epoch(self.trainer.train_step, self.train_data, epoch, is_train_run=True)
if (self.valid_data is not None):
valid_measures = self.run_epoch(self.trainer.validation_step, self.valid_data, epoch, is_train_run=False)
else:
valid_measures = {}
if self.save:
self.saver.save_model((epoch + 1))
if hasattr(self.train_data, 'save_masks'):
self.train_data.save_masks((epoch + 1))
elapsed = timer.elapsed()
train_measures_str = measures_string_to_print(train_measures)
val_measures_str = measures_string_to_print(valid_measures)
print('epoch', (epoch + 1), 'finished. elapsed:', ('%.5f' % elapsed), 'train:', train_measures_str, 'valid:', val_measures_str, file=log.v1)
def eval(self):
timer = Timer()
measures = self.run_epoch(self.trainer.validation_step, self.valid_data, epoch=0, is_train_run=False)
elapsed = timer.elapsed()
print('eval finished. elapsed:', elapsed, measures, file=log.v1)
@staticmethod
def run_epoch(step_fn, data, epoch, is_train_run):
n_examples_processed = 0
n_examples_per_epoch = data.n_examples_per_epoch()
extraction_keys = data.get_extraction_keys()
measures_accumulated = {}
if ((not is_train_run) and hasattr(data, 'prepare_saving_epoch_measures')):
data.prepare_saving_epoch_measures((epoch + 1))
while (n_examples_processed < n_examples_per_epoch):
timer = Timer()
n_examples_processed_total = ((n_examples_per_epoch * epoch) + n_examples_processed)
res = step_fn(epoch, n_examples_processed_total=n_examples_processed_total, extraction_keys=extraction_keys)
measures = res[Measures.MEASURES]
n_examples_processed += measures[Measures.N_EXAMPLES]
measures_str = measures_string_to_print(compute_measures_average(measures, for_final_result=False))
accumulate_measures(measures_accumulated, measures)
if ((not is_train_run) and hasattr(data, 'save_epoch_measures')):
data.save_epoch_measures(measures)
if hasattr(data, 'use_segmentation_mask'):
data.use_segmentation_mask(res)
elapsed = timer.elapsed()
print('{:>5}'.format(n_examples_processed), '/', n_examples_per_epoch, measures_str, 'elapsed', elapsed, file=log.v5)
measures_averaged = compute_measures_average(measures_accumulated, for_final_result=True)
if ((not is_train_run) and hasattr(data, 'finalize_saving_epoch_measures')):
new_measures = data.finalize_saving_epoch_measures()
measures_averaged.update(new_measures)
return measures_averaged
|
def accumulate_extractions(extractions_accumulator, *new_extractions):
if (len(new_extractions) == 0):
return
if (len(extractions_accumulator) == 0):
extractions_accumulator.update(new_extractions[0])
new_extractions = new_extractions[1:]
for (k, v) in extractions_accumulator.items():
for ext in new_extractions:
extractions_accumulator[k] += ext[k]
return extractions_accumulator
|
class Stream():
def __init__(self, log, lvl):
'\n :type log: logging.Logger\n :type lvl: int\n '
self.buf = StringIO.StringIO()
self.log = log
self.lvl = lvl
self.lock = RLock()
def write(self, msg):
with self.lock:
if (msg == '\n'):
self.flush()
else:
self.buf.write(msg)
def flush(self):
with self.lock:
self.buf.flush()
self.log.log(self.lvl, self.buf.getvalue())
self.buf.truncate(0)
self.buf.seek(0)
|
class Log(object):
def initialize(self, logs=[], verbosity=[], formatter=[]):
fmt = {'default': logging.Formatter('%(message)s'), 'timed': logging.Formatter('%(asctime)s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S.%MS'), 'raw': logging.Formatter('%(message)s'), 'verbose': logging.Formatter('%(levelname)s - %(asctime)s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S.%MS')}
self.v = [logging.getLogger(('v' + str(v))) for v in range(6)]
for l in self.v:
l.handlers = []
if (not ('stdout' in logs)):
logs.append('stdout')
for i in range(len(logs)):
t = logs[i]
v = 3
if (i < len(verbosity)):
v = verbosity[i]
elif (len(verbosity) == 1):
v = verbosity[0]
assert (v <= 5), ('invalid verbosity: ' + str(v))
f = (fmt['default'] if ((i >= len(formatter)) or (not fmt.has_key(formatter[i]))) else fmt[formatter[i]])
if (t == 'stdout'):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
elif t.startswith('|'):
proc_cmd = t[1:].strip()
from subprocess import Popen, PIPE
proc = Popen(proc_cmd, shell=True, stdin=PIPE)
handler = logging.StreamHandler(proc.stdin)
handler.setLevel(logging.DEBUG)
elif os.path.isdir(os.path.dirname(t)):
handler = logging.FileHandler(t)
handler.setLevel(logging.DEBUG)
else:
assert False, ('invalid log target %r' % t)
handler.setFormatter(f)
for j in range((v + 1)):
if (not (handler in self.v[j].handlers)):
self.v[j].addHandler(handler)
self.verbose = ([True] * 6)
null = logging.FileHandler(os.devnull)
for i in range(len(self.v)):
self.v[i].setLevel(logging.DEBUG)
if (not self.v[i].handlers):
self.verbose[i] = False
self.v[i].addHandler(null)
self.error = Stream(self.v[0], logging.CRITICAL)
self.v0 = Stream(self.v[0], logging.ERROR)
self.v1 = Stream(self.v[1], logging.INFO)
self.v2 = Stream(self.v[2], logging.INFO)
self.v3 = Stream(self.v[3], logging.DEBUG)
self.v4 = Stream(self.v[4], logging.DEBUG)
self.v5 = Stream(self.v[5], logging.DEBUG)
def write(self, msg):
self.info(msg)
|
def accumulate_measures(measures_accumulator, *new_measures, exclude=[DET_BOXES, DET_PROBS, DET_LABELS, DET_MASKS, IMAGE_ID]):
if (len(new_measures) == 0):
return
if (len(measures_accumulator) == 0):
measures_accumulator.update(new_measures[0])
new_measures = new_measures[1:]
for (k, v) in measures_accumulator.items():
if (k not in exclude):
for meas in new_measures:
measures_accumulator[k] += meas[k]
return measures_accumulator
|
def compute_measures_average(measures, for_final_result, exclude=[DET_BOXES, DET_PROBS, DET_LABELS, DET_MASKS, IMAGE_ID]):
measures_avg = {}
n_examples = measures[N_EXAMPLES]
for (k, v) in measures.items():
if (k not in exclude):
measures_avg[k] = (measures[k] / n_examples)
del measures_avg[N_EXAMPLES]
return measures_avg
|
def measures_string_to_print(measures, exclude=[DET_BOXES, DET_PROBS, DET_LABELS, DET_MASKS, IMAGE_ID]):
s = '{'
first = True
measures_to_print = [m for m in measures.keys() if (m not in exclude)]
for k in sorted(measures_to_print):
s += '{}{}: {:8.5}'.format(('' if first else ', '), k, measures[k])
first = False
s += '}'
return s
|
def compute_measures_for_binary_segmentation_tf(predictions, targets):
def f(ps, ts):
meas = compute_measures_for_binary_segmentation_summed(ps, ts)
meas = [np.cast[np.float32](meas[IOU]), np.cast[np.float32](meas[RECALL]), np.cast[np.float32](meas[PRECISION])]
return meas
res = tf.py_func(f, [predictions, targets], [tf.float32, tf.float32, tf.float32])
for r in res:
r.set_shape(())
res = {IOU: res[0], RECALL: res[1], PRECISION: res[2]}
return res
|
def compute_measures_for_binary_segmentation_summed(predictions, targets):
res = [compute_measures_for_binary_segmentation_single_image(p, t) for (p, t) in zip(predictions, targets)]
accum = res[0]
for r in res[1:]:
for (k, v) in r.items():
accum[k] += v
return accum
|
def compute_measures_for_binary_segmentation_single_image(prediction, target):
assert ((target.ndim == 2) or ((target.ndim == 3) and (target.shape[(- 1)] == 1)))
valid_mask = (target != VOID_LABEL)
T = np.logical_and(target, valid_mask).sum()
P = np.logical_and(prediction, valid_mask).sum()
I = np.logical_and((prediction == 1), (target == 1)).sum()
U = np.logical_and(np.logical_or((prediction == 1), (target == 1)), valid_mask).sum()
if (U == 0):
recall = 1.0
precision = 1.0
iou = 1.0
else:
if (T == 0):
recall = 1.0
else:
recall = (float(I) / T)
if (P == 0):
precision = 1.0
else:
precision = (float(I) / P)
iou = (float(I) / U)
measures = {RECALL: recall, PRECISION: precision, IOU: iou}
return measures
|
class Timer():
def __init__(self, message='', stream=None):
if (stream is None):
from core.Log import log
stream = log.v4
self.stream = stream
self.start_time = time.time()
self.message = message
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
if (self.message is not None):
print(self.message, 'elapsed', self.elapsed(), file=self.stream)
def elapsed(self):
end = time.time()
start = self.start_time
elapsed = (end - start)
return elapsed
|
class Trainer():
def __init__(self, config, train_network, test_network, global_step, session):
self.opt_str = config.string('optimizer', 'adam').lower()
self.train_network = train_network
self.test_network = test_network
self.session = session
self.global_step = global_step
self.validation_step_number = 0
self.gradient_clipping = config.float('gradient_clipping', (- 1.0))
self.learning_rates = config.int_key_dict('learning_rates')
self.learning_rate_keys_are_steps = config.bool('learning_rate_keys_are_steps', False)
self.curr_learning_rate = self.learning_rates[1]
self.lr_var = tf.placeholder(tf.float32, shape=[], name='learning_rate')
self.loss_scale_var = tf.placeholder_with_default(1.0, shape=[], name='loss_scale')
self.use_gradient_checkpointing = config.bool('use_gradient_checkpointing', False)
if self.use_gradient_checkpointing:
import memory_saving_gradients
from tensorflow.python.ops import gradients
new_grad_fun = memory_saving_gradients.gradients_collection
tf.__dict__['gradients'] = new_grad_fun
gradients.__dict__['gradients'] = new_grad_fun
(self.opt, self.reset_opt_op) = self.create_optimizer(config)
self.collect_run_metadata = config.bool('collect_run_metadata', False)
grad_norm = None
if (train_network is not None):
(self._step_op, grad_norm) = self.create_step_op_and_grad_norm()
self._update_ops = self.train_network.update_ops
else:
self._step_op = None
self._update_ops = None
(self.summary_writer, self.summary_op_train, self.summary_op_test) = self.init_summaries(config, grad_norm)
def create_optimizer(self, config):
momentum = config.float('momentum', 0.9)
if (self.opt_str == 'sgd_nesterov'):
return (tf.train.MomentumOptimizer(self.lr_var, momentum, use_nesterov=True), None)
elif (self.opt_str == 'sgd_momentum'):
return (tf.train.MomentumOptimizer(self.lr_var, momentum), None)
elif (self.opt_str == 'sgd'):
return (tf.train.GradientDescentOptimizer(self.lr_var), None)
elif (self.opt_str == 'adam'):
opt = tf.train.AdamOptimizer(self.lr_var)
all_vars = tf.global_variables()
opt_vars = [v for v in all_vars if ('Adam' in v.name)]
reset_opt_op = tf.variables_initializer(opt_vars, 'reset_optimizer')
return (opt, reset_opt_op)
elif (self.opt_str == 'none'):
return (None, None)
else:
assert False, ('unknown optimizer', self.opt_str)
def reset_optimizer(self):
assert (self.opt_str == 'adam'), 'reset not implemented for other optimizers yet'
assert (self.reset_opt_op is not None)
self.session.run(self.reset_opt_op)
def init_summaries(self, config, grad_norm=None):
summdir = config.dir('summary_dir', 'summaries')
model = config.string('model')
summdir += (model + '/')
tf.gfile.MakeDirs(summdir)
summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
summary_op = None
summary_op_test = None
if config.bool('write_summaries', True):
if (self.train_network is not None):
train_summs = self.train_network.summaries
if (grad_norm is not None):
grad_norm_summary = tf.summary.scalar('grad_norm', grad_norm)
train_summs.append(grad_norm_summary)
if (len(train_summs) > 0):
summary_op = tf.summary.merge(self.train_network.summaries)
if ((self.test_network is not None) and (len(self.test_network.summaries) > 0)):
summary_op_test = tf.summary.merge(self.test_network.summaries)
return (summary_writer, summary_op, summary_op_test)
def adjust_learning_rate(self, epoch, n_examples_processed_total, learning_rate=None):
if (learning_rate is None):
if self.learning_rate_keys_are_steps:
key = max([k for k in self.learning_rates.keys() if (k <= (n_examples_processed_total + 1))])
else:
key = max([k for k in self.learning_rates.keys() if (k <= (epoch + 1))])
new_lr = self.learning_rates[key]
else:
new_lr = learning_rate
if (self.curr_learning_rate != new_lr):
print('changing learning rate to', new_lr, file=log.v1)
self.curr_learning_rate = new_lr
def create_step_op_and_grad_norm(self):
if (self.opt is None):
return (tf.no_op('dummy_step_op'), None)
losses_with_regularizers = self.train_network.tower_total_losses_with_regularizers
setups = self.train_network.tower_setups
tower_grads = []
for (l, s) in zip(losses_with_regularizers, setups):
gpu_str = ('/gpu:' + str(s.gpu_idx))
with tf.device(gpu_str), tf.name_scope((('tower_gpu_' + str(s.gpu_idx)) + '_opt')):
var_list = (tf.trainable_variables() + tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads_raw = self.opt.compute_gradients(l, var_list=var_list, gate_gradients=False, colocate_gradients_with_ops=True)
grads_filtered = [g for g in grads_raw if (g[0] is not None)]
tower_grads.append(grads_filtered)
with tf.device(setups[0].variable_device):
if (len(tower_grads) == 1):
grads = tower_grads[0]
else:
grads = average_gradients(tower_grads)
if (self.gradient_clipping != (- 1)):
(grads, norm) = clip_gradients(grads, self.gradient_clipping)
else:
norm = None
if (len(grads) == 0):
return (tf.no_op('dummy_step_op'), None)
step_op = self.opt.apply_gradients(grads, global_step=self.global_step)
return (step_op, norm)
def validation_step(self, epoch=None, n_examples_processed_total=None, feed_dict=None, extraction_keys=()):
ops = {Measures.MEASURES: self.test_network.tower_measures}
res = self._step(self.test_network, feed_dict, ops, self.summary_op_test, extraction_keys, self.validation_step_number)
self.validation_step_number += 1
return res
def train_step(self, epoch, n_examples_processed_total=None, feed_dict=None, loss_scale=1.0, learning_rate=None, extraction_keys=()):
self.adjust_learning_rate(epoch, n_examples_processed_total, learning_rate)
if (feed_dict is None):
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict[self.lr_var] = self.curr_learning_rate
feed_dict[self.loss_scale_var] = loss_scale
ops = {'_update_ops': self._update_ops, '_step': self._step_op, 'global_step': self.global_step, Measures.MEASURES: self.train_network.tower_measures}
res = self._step(self.train_network, feed_dict, ops, self.summary_op_train, extraction_keys, step_number=None)
return res
def _step(self, network, feed_dict, ops, summary_op, extraction_keys, step_number):
if (feed_dict is None):
feed_dict = {}
if (summary_op is not None):
ops['summaries'] = summary_op
if (len(extraction_keys) > 0):
ops[Extractions.EXTRACTIONS] = [{k: [v] for (k, v) in extractions.items() if (k in extraction_keys)} for extractions in network.tower_extractions]
if self.collect_run_metadata:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
else:
run_options = None
run_metadata = None
res = self.session.run(ops, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
summary_str = res['summaries']
del res['summaries']
if (step_number is None):
step_number = res['global_step']
if (self.collect_run_metadata and (step_number > 50)):
self.summary_writer.add_run_metadata(run_metadata, tag='timing', global_step=step_number)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timing.json', 'w') as f:
f.write(chrome_trace)
self.summary_writer.add_summary(summary_str, global_step=step_number)
res[Measures.MEASURES] = accumulate_measures({}, *res[Measures.MEASURES])
if (len(extraction_keys) > 0):
res[Extractions.EXTRACTIONS] = accumulate_extractions({}, *res[Extractions.EXTRACTIONS])
return res
|
class Augmentor():
def apply_before_resize(self, tensors):
return tensors
def apply_after_resize(self, tensors):
return tensors
def batch_apply_before_resize(self, tensors_batch):
return tensors_batch
def batch_apply_after_resize(self, tensors_batch):
return tensors_batch
|
class GammaAugmentor(Augmentor):
def __init__(self, gamma_range=((- 0.1), 0.1)):
self.gamma_range = gamma_range
def apply_after_resize(self, tensors, factor=None):
'\n Augments the images. Expects it to be in the [0, 1] range\n '
with tf.name_scope('gamma_augmentor'):
img = tensors[DataKeys.IMAGES]
if (factor is None):
factor = self._sample_factor()
gamma = (tf.log((0.5 + ((1 / math.sqrt(2)) * factor))) / tf.log((0.5 - ((1 / math.sqrt(2)) * factor))))
aug_image = (img ** gamma)
aug_tensors = tensors.copy()
aug_tensors[DataKeys.IMAGES] = aug_image
return aug_tensors
def _sample_factor(self):
return tf.random_uniform(shape=[], minval=self.gamma_range[0], maxval=self.gamma_range[1], dtype=tf.float32)
def batch_apply_after_resize(self, tensors_batch):
factor = self._sample_factor()
return [self.apply_after_resize(x, factor) for x in tensors_batch]
|
class FlipAugmentor(Augmentor):
def __init__(self, p=0.5):
'\n :param p: The probability that the image will be flipped.\n '
self.p = p
def apply_after_resize(self, tensors, doit=None):
with tf.name_scope('flip_augmentor'):
aug_tensors = tensors.copy()
if (doit is None):
doit = self._sample_doit()
def maybe_flip(key_, image_flip):
if (key_ in tensors):
val = tensors[key_]
if image_flip:
flipped = tf.image.flip_left_right(val)
else:
flipped = flip_coords_horizontal(val, tf.shape(tensors[DataKeys.IMAGES])[1])
aug = tf.cond(doit, (lambda : flipped), (lambda : val))
aug_tensors[key_] = aug
keys_to_flip = [DataKeys.IMAGES, DataKeys.SEGMENTATION_LABELS, DataKeys.BBOX_GUIDANCE, DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE, DataKeys.LASER_GUIDANCE, DataKeys.SEGMENTATION_MASK]
coords_to_flip = [DataKeys.BBOXES_y0x0y1x1]
for key in keys_to_flip:
maybe_flip(key, image_flip=True)
for key in coords_to_flip:
maybe_flip(key, image_flip=False)
return aug_tensors
def _sample_doit(self):
return (tf.random_uniform([]) > self.p)
def batch_apply_after_resize(self, tensors_batch):
doit = self._sample_doit()
return [self.apply_after_resize(x, doit) for x in tensors_batch]
|
class BBoxJitterAugmentor(Augmentor):
def __init__(self, v=0.15):
self.v = v
def apply_before_resize(self, tensors, g=None):
if (DataKeys.BBOXES_y0x0y1x1 in tensors):
(y0, x0, y1, x1) = tf.unstack(tf.cast(tensors[DataKeys.BBOXES_y0x0y1x1], tf.float32))
if (g is None):
g = self._sample_g()
g = tf.clip_by_value(g, (- 2.5), 2.5)
h = (y1 - y0)
w = (x1 - x0)
y0 += ((self.v * g[0]) * h)
x0 += ((self.v * g[1]) * w)
y1 += ((self.v * g[2]) * h)
x1 += ((self.v * g[3]) * w)
shape = tf.shape(tensors[DataKeys.IMAGES])
y0 = tf.maximum(y0, 0)
x0 = tf.maximum(x0, 0)
y1 = tf.minimum(y1, tf.cast(shape[0], tf.float32))
x1 = tf.minimum(x1, tf.cast(shape[1], tf.float32))
bbox_jittered = tf.stack([y0, x0, y1, x1])
tensors[DataKeys.BBOXES_y0x0y1x1] = bbox_jittered
return tensors
@staticmethod
def _sample_g():
return tf.random_normal((4,))
def batch_apply_before_resize(self, tensors_batch):
g = self._sample_g()
return [self.apply_before_resize(x, g) for x in tensors_batch]
|
def parse_augmentors(strs, config):
augmentors = []
for s in strs:
if (s == 'gamma'):
augmentor = GammaAugmentor(gamma_range=((- 0.05), 0.05))
elif (s == 'flip'):
augmentor = FlipAugmentor()
elif (s == 'bbox_jitter'):
v = config.float('bbox_jitter_factor', 0.15)
print('using bbox_jitter_factor=', v, file=log.v5, sep='')
augmentor = BBoxJitterAugmentor(v)
else:
assert False, ('unknown augmentor' + s)
augmentors.append(augmentor)
return augmentors
|
def read_image_and_annotation_list(fn, data_dir):
imgs = []
ans = []
with open(fn) as f:
for l in f:
sp = l.split()
an = (data_dir + sp[1])
im = (data_dir + sp[0])
imgs.append(im)
ans.append(an)
return (imgs, ans)
|
def get_input_list_file(subset, trainsplit):
if (subset == 'train'):
if (trainsplit == 0):
return 'ImageSets/480p/train.txt'
elif (trainsplit == 1):
return 'ImageSets/480p/trainsplit_train.txt'
elif (trainsplit == 2):
return 'ImageSets/480p/trainsplit2_train.txt'
elif (trainsplit == 3):
return 'ImageSets/480p/trainsplit3_train.txt'
else:
assert False, 'invalid trainsplit'
elif (trainsplit == 0):
return 'ImageSets/480p/val.txt'
elif (trainsplit == 1):
return 'ImageSets/480p/trainsplit_val.txt'
elif (trainsplit == 2):
return 'ImageSets/480p/trainsplit2_val.txt'
elif (trainsplit == 3):
return 'ImageSets/480p/trainsplit3_val.txt'
else:
assert False, 'invalid trainsplit'
|
@register_dataset('davis')
class DAVISDataset(FileListDataset):
def __init__(self, config, subset, num_classes, name='davis16'):
super().__init__(config, name, subset, DAVIS_DEFAULT_PATH, num_classes)
self.trainsplit = config.int('trainsplit', 0)
def postproc_annotation(self, ann_filename, ann):
return (ann / 255)
def read_inputfile_lists(self):
assert (self.subset in ('train', 'valid')), self.subset
list_file = get_input_list_file(self.subset, self.trainsplit)
(imgs, ans) = read_image_and_annotation_list((self.data_dir + list_file), self.data_dir)
return (imgs, ans)
|
def postproc_2017_labels(labels):
return tf.cast((tf.reduce_max(labels, axis=2, keep_dims=True) > 0), tf.uint8)
|
def get_input_list_file_2017(subset):
if (subset == 'train'):
return 'ImageSets/2017/train.txt'
elif (subset == 'valid'):
return 'ImageSets/2017/val.txt'
else:
assert False, ('invalid subset', subset)
|
def read_image_and_annotation_list_2017(fn, data_dir):
imgs = []
ans = []
with open(fn) as f:
for seq in f:
seq = seq.strip()
base_seq = seq.split('__')[0]
imgs_seq = sorted(glob.glob((((data_dir + 'JPEGImages/480p/') + base_seq) + '/*.jpg')))
ans_seq = [im.replace('JPEGImages', 'Annotations').replace('.jpg', '.png') for im in imgs_seq]
if ('__' in seq):
ans_seq = [x.replace(base_seq, seq) for x in ans_seq]
imgs_seq = [x.replace(base_seq, seq) for x in imgs_seq]
imgs += imgs_seq
ans += ans_seq
return (imgs, ans)
|
@register_dataset('davis17')
@register_dataset('davis2017')
class DAVIS2017Dataset(FileListDataset):
def __init__(self, config, subset, num_classes, name='davis17'):
super().__init__(config, name, subset, DAVIS2017_DEFAULT_PATH, num_classes)
def read_inputfile_lists(self):
assert (self.subset in ('train', 'valid')), self.subset
list_file = get_input_list_file_2017(self.subset)
(imgs, ans) = read_image_and_annotation_list_2017((self.data_dir + list_file), self.data_dir)
return (imgs, ans)
|
class AbstractDataset(ABC):
def __init__(self, config, subset, num_classes):
self.summaries = []
self.config = config
self.subset = subset
self.n_classes = num_classes
self.use_bbox_guidance = config.bool('use_bbox_guidance', False)
self.use_unsigned_distance_transform_guidance = config.bool('use_unsigned_distance_transform_guidance', False)
self.use_signed_distance_transform_guidance = config.bool('use_signed_distance_transform_guidance', False)
self.use_laser_guidance = config.bool('use_laser_guidance', False)
self.use_clicks_guidance = config.bool('use_clicks_guidance', False)
self.epoch_length_train = config.int('epoch_length_train', (- 1))
self.shuffle_buffer_size = config.int('shuffle_buffer_size', 5000)
self.use_summaries = self.config.bool('use_summaries', False)
@abstractmethod
def n_examples_per_epoch(self):
if ((self.subset == 'train') and (self.epoch_length_train != (- 1))):
return self.epoch_length_train
else:
return None
@abstractmethod
def create_input_tensors_dict(self, batch_size):
pass
def num_classes(self):
return self.n_classes
def load_example(self, input_filenames):
raw_example = self.load_raw_example(*input_filenames)
processed = self.process_raw_example(raw_example)
return processed
def process_raw_example(self, example):
example = self.postproc_example_initial(example)
example = self.augment_example_before_resize(example)
example = self.postproc_example_before_resize(example)
example = self.resize_example(example)
example = self.augment_example_after_resize(example)
example = self.postproc_example_before_assembly(example)
example = self.assemble_example(example)
return example
def load_raw_example(self, img_filename, label_filename=None, *args):
img_tensors = self.load_image(img_filename)
if (not isinstance(img_tensors, dict)):
img_tensors = {DataKeys.IMAGES: img_tensors}
label_tensors = self.load_annotation(img_tensors[DataKeys.IMAGES], img_filename, label_filename)
if (not isinstance(label_tensors, dict)):
label_tensors = {DataKeys.SEGMENTATION_LABELS: label_tensors}
for k in img_tensors.keys():
assert (k not in label_tensors.keys())
example = img_tensors
example.update(label_tensors)
return example
def load_image(self, img_filename):
img_data = tf.read_file(img_filename)
img = tf.image.decode_image(img_data, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img.set_shape((None, None, 3))
return img
def load_annotation(self, img, img_filename, annotation_filename):
ann_data = tf.read_file(annotation_filename)
ann = tf.image.decode_image(ann_data, channels=1)
ann.set_shape((img.get_shape().as_list()[:(- 1)] + [1]))
ann = self.postproc_annotation(annotation_filename, ann)
return ann
def postproc_annotation(self, ann_filename, ann):
return ann
def resize_example(self, tensors):
resize_mode_str = self.config.string(('resize_mode_' + self.subset), '')
if (resize_mode_str == ''):
print('Using resize_mode_train for', self.subset, ('since resize_mode_' + self.subset), 'not specified in the config', file=log.v1)
resize_mode_str = self.config.string('resize_mode_train')
size = self.config.int_list(('input_size_' + self.subset), [])
if (len(size) == 0):
size = self.config.int_list('input_size_train', [])
resize_mode = ResizeMode(resize_mode_str)
tensors = resize(tensors, resize_mode, size)
return tensors
def augment_example_before_resize(self, tensors):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors = aug.apply_before_resize(tensors)
return tensors
def augment_example_after_resize(self, tensors):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors = aug.apply_after_resize(tensors)
return tensors
def jointly_augment_examples_before_resize(self, tensors_batch):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors_batch = aug.batch_apply_before_resize(tensors_batch)
return tensors_batch
def jointly_augment_examples_after_resize(self, tensors_batch):
augmentors_str = self.config.string_list(('augmentors_' + self.subset), [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors_batch = aug.batch_apply_after_resize(tensors_batch)
return tensors_batch
def postproc_example_initial(self, tensors):
if ((DataKeys.IMAGES in tensors) and (DataKeys.RAW_IMAGES not in tensors)):
tensors[DataKeys.RAW_IMAGES] = tensors[DataKeys.IMAGES]
if ((DataKeys.IMAGES in tensors) and (DataKeys.RAW_IMAGE_SIZES not in tensors)):
tensors[DataKeys.RAW_IMAGE_SIZES] = tf.shape(tensors[DataKeys.IMAGES])[0:2]
if ((DataKeys.SEGMENTATION_LABELS in tensors) and (DataKeys.BBOXES_y0x0y1x1 not in tensors)):
print('deriving bboxes from segmentation masks', file=log.v5)
segmentation_labels = tensors[DataKeys.SEGMENTATION_LABELS]
bbox = get_bbox_from_segmentation_mask(segmentation_labels)
tensors[DataKeys.BBOXES_y0x0y1x1] = bbox
return tensors
def postproc_example_before_assembly(self, tensors):
tensors_postproc = tensors.copy()
tensors_postproc[DataKeys.IMAGES] = normalize(tensors[DataKeys.IMAGES])
if self.use_signed_distance_transform_guidance:
assert (DataKeys.BBOX_GUIDANCE in tensors)
bbox_guidance = tensors[DataKeys.BBOX_GUIDANCE]
sdt = signed_distance_transform(bbox_guidance)
tensors_postproc[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE] = sdt
if self.use_unsigned_distance_transform_guidance:
assert (DataKeys.BBOX_GUIDANCE in tensors)
bbox_guidance = tensors[DataKeys.BBOX_GUIDANCE]
udt = unsigned_distance_transform(bbox_guidance)
tensors_postproc[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE] = udt
return tensors_postproc
def postproc_example_before_resize(self, tensors):
tensors_postproc = tensors.copy()
if ((self.use_bbox_guidance or self.use_signed_distance_transform_guidance or self.use_unsigned_distance_transform_guidance) and (DataKeys.BBOXES_y0x0y1x1 in tensors) and (DataKeys.BBOX_GUIDANCE not in tensors)):
bbox = tensors[DataKeys.BBOXES_y0x0y1x1]
img = tensors[DataKeys.IMAGES]
bbox_guidance = encode_bbox_as_mask(bbox, tf.shape(img))
tensors_postproc[DataKeys.BBOX_GUIDANCE] = bbox_guidance
return tensors_postproc
def assemble_example(self, tensors):
tensors_assembled = tensors.copy()
inputs_to_concat = [tensors[DataKeys.IMAGES]]
if (self.use_bbox_guidance and (DataKeys.BBOX_GUIDANCE in tensors)):
print('using bbox guidance', file=log.v5)
bbox_guidance = tf.cast(tensors[DataKeys.BBOX_GUIDANCE], tf.float32)
inputs_to_concat.append(bbox_guidance)
if (self.use_signed_distance_transform_guidance and (DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE in tensors)):
print('using signed distance transform guidance')
assert (not self.use_bbox_guidance), "we probably don't want to use both bbox and sdt guidance at the same time"
sdt_guidance = tensors[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE]
inputs_to_concat.append(sdt_guidance)
if (self.use_unsigned_distance_transform_guidance and (DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE in tensors)):
print('using unsigned distance transform guidance')
assert (not self.use_bbox_guidance), "we probably don't want to use both bbox and udt guidance at the same time"
udt_guidance = tensors[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE]
inputs_to_concat.append(udt_guidance)
if (self.use_laser_guidance and (DataKeys.LASER_GUIDANCE in tensors)):
print('using laser guidance', file=log.v5)
laser_guidance = tf.cast(tensors[DataKeys.LASER_GUIDANCE], tf.float32)
inputs_to_concat.append(laser_guidance)
if self.use_clicks_guidance:
print('using guidance from clicks')
neg_dist_transform = tensors[DataKeys.NEG_CLICKS]
pos_dist_transform = tensors[DataKeys.POS_CLICKS]
inputs_to_concat.append(neg_dist_transform)
inputs_to_concat.append(pos_dist_transform)
if (len(inputs_to_concat) > 1):
inputs = tf.concat(inputs_to_concat, axis=(- 1))
else:
inputs = inputs_to_concat[0]
tensors_assembled[DataKeys.INPUTS] = inputs
return tensors_assembled
def create_summaries(self, data):
if (DataKeys.IMAGES in data):
self.summaries.append(tf.summary.image((self.subset + 'data/images'), unnormalize(data[DataKeys.IMAGES])))
if (DataKeys.SEGMENTATION_LABELS in data):
self.summaries.append(tf.summary.image((self.subset + 'data/ground truth segmentation labels'), tf.cast(data[DataKeys.SEGMENTATION_LABELS], tf.float32)))
if (DataKeys.BBOX_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/bbox guidance'), tf.cast(data[DataKeys.BBOX_GUIDANCE], tf.float32)))
if (DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/signed_distance_transform_guidance'), data[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE]))
if (DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/unsigned_distance_transform_guidance'), data[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE]))
if (DataKeys.LASER_GUIDANCE in data):
self.summaries.append(tf.summary.image((self.subset + 'data/laser guidance'), tf.cast(data[DataKeys.LASER_GUIDANCE], tf.float32)))
|
class FileListDataset(AbstractDataset):
def __init__(self, config, dataset_name, subset, default_path, num_classes):
super().__init__(config, subset, num_classes)
self.inputfile_lists = None
self.fraction = config.float('data_fraction', 1.0)
self.data_dir = config.string((dataset_name + '_data_dir'), default_path)
self._num_parallel_calls = config.int('num_parallel_calls', 32)
self._prefetch_buffer_size = config.int('prefetch_buffer_size', 20)
def _load_inputfile_lists(self):
if (self.inputfile_lists is not None):
return
self.inputfile_lists = self.read_inputfile_lists()
assert (len(self.inputfile_lists) > 0)
for l in self.inputfile_lists:
assert (len(l) > 0)
assert all([(len(l) == len(self.inputfile_lists[0])) for l in self.inputfile_lists])
if (self.fraction < 1.0):
n = int((self.fraction * len(self.inputfile_lists[0])))
self.inputfile_lists = tuple([l[:n] for l in self.inputfile_lists])
def n_examples_per_epoch(self):
self._load_inputfile_lists()
n_examples = super().n_examples_per_epoch()
if (n_examples is None):
return len(self.inputfile_lists[0])
else:
return n_examples
def create_input_tensors_dict(self, batch_size):
self._load_inputfile_lists()
if (self.subset == 'train'):
zipped = list(zip(*self.inputfile_lists))
shuffle(zipped)
inputfile_lists_shuffled = tuple(([x[idx] for x in zipped] for idx in range(len(self.inputfile_lists))))
else:
inputfile_lists_shuffled = self.inputfile_lists
tfdata = tf.data.Dataset.from_tensor_slices(inputfile_lists_shuffled)
if (self.subset == 'train'):
tfdata = tfdata.shuffle(buffer_size=self.shuffle_buffer_size)
def _load_example(*input_filenames):
example = self.load_example(input_filenames)
if (batch_size > 1):
if (DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE in example):
del example[DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE]
if (DataKeys.RAW_IMAGES in example):
del example[DataKeys.RAW_IMAGES]
return example
def _filter_example(tensors):
if (DataKeys.SKIP_EXAMPLE in tensors):
return tf.logical_not(tensors[DataKeys.SKIP_EXAMPLE])
else:
return tf.constant(True)
tfdata = tfdata.map(_load_example, num_parallel_calls=self._num_parallel_calls)
tfdata = tfdata.filter(_filter_example)
tfdata = tfdata.repeat()
tfdata = self._batch(tfdata, batch_size)
tfdata = tfdata.prefetch(buffer_size=self._prefetch_buffer_size)
res = tfdata.make_one_shot_iterator().get_next()
if self.use_summaries:
self.create_summaries(res)
return res
def _batch(self, tfdata, batch_size):
if (batch_size > 1):
tfdata = tfdata.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
elif (batch_size == 1):
tfdata = tfdata.map((lambda x: {k: tf.expand_dims(v, axis=0) for (k, v) in x.items()}))
else:
assert False, ('invalid batch size', batch_size)
return tfdata
def get_extraction_keys(self):
return []
@abstractmethod
def read_inputfile_lists(self):
pass
|
class DetectionFileListDataset(FileListDataset):
def __init__(self, config, dataset_name, subset, default_path, num_classes, n_max_detections, class_ids_with_instances, id_divisor):
super().__init__(config, dataset_name, subset, default_path, num_classes)
self.add_masks = config.bool('add_masks', True)
init_anchors(config)
self._n_max_detections = n_max_detections
self._class_ids_with_instances = class_ids_with_instances
self._id_divisor = id_divisor
def assemble_example(self, tensors):
tensors = super().assemble_example(tensors)
tensors = add_rpn_data(tensors)
return tensors
def load_annotation(self, img, img_filename, annotation_filename):
load_ann_np = partial(load_instance_seg_annotation_np, n_max_detections=self._n_max_detections, class_ids_with_instances=self._class_ids_with_instances, id_divisor=self._id_divisor)
(bboxes, ids, classes, is_crowd, mask) = tf.py_func(load_ann_np, [annotation_filename], [tf.float32, tf.int32, tf.int32, tf.int32, tf.uint8], name='postproc_ann_np')
bboxes.set_shape((self._n_max_detections, 4))
ids.set_shape((self._n_max_detections,))
classes.set_shape((self._n_max_detections,))
is_crowd.set_shape((self._n_max_detections,))
mask.set_shape((None, None, self._n_max_detections))
return_dict = {DataKeys.BBOXES_y0x0y1x1: bboxes, DataKeys.CLASSES: classes, DataKeys.IDS: ids, DataKeys.IS_CROWD: is_crowd}
if self.add_masks:
return_dict[DataKeys.SEGMENTATION_MASK] = mask
return return_dict
|
class FeedDataset(AbstractDataset):
def __init__(self, config, subset, data_keys_to_use, num_classes=2):
super().__init__(config, subset, num_classes)
self._data_keys_to_use = data_keys_to_use
self._batch_size = (- 1)
if (subset == 'val'):
self._batch_size = config.int('batch_size_val', (- 1))
if (self._batch_size == (- 1)):
self._batch_size = config.int('batch_size_eval', (- 1))
if (self._batch_size == (- 1)):
self._batch_size = config.int('batch_size')
self._placeholders = self._create_placeholders()
def _create_placeholders(self):
dtypes_and_shapes = {DataKeys.IMAGES: (tf.float32, (None, None, 3)), DataKeys.SEGMENTATION_LABELS: (tf.uint8, (None, None, 1)), DataKeys.BBOX_GUIDANCE: (tf.uint8, (None, None, 1)), DataKeys.IMAGE_FILENAMES: (tf.string, ()), DataKeys.OBJ_TAGS: (tf.string, ()), DataKeys.LASER_GUIDANCE: (tf.float32, (None, None, 1)), DataKeys.BBOXES_y0x0y1x1: (tf.float32, (4,)), DataKeys.NEG_CLICKS: (tf.uint8, (None, None, 1))}
placeholders = {}
for key in self._data_keys_to_use:
(dtype, shape) = dtypes_and_shapes[key]
key_placeholders = [tf.placeholder(dtype, shape, name=(key + '_placeholder_{}'.format(idx))) for idx in range(self._batch_size)]
placeholders[key] = key_placeholders
return placeholders
def n_examples_per_epoch(self):
raise NotImplementedError()
def create_input_tensors_dict(self, batch_size):
examples = [self._create_input_tensors_dict_single_example(idx_in_minibatch=idx) for idx in range(batch_size)]
keys = examples[0].keys()
data = {}
for k in keys:
if (batch_size > 1):
if (k == DataKeys.RAW_IMAGES):
continue
data[k] = tf.stack([example[k] for example in examples], axis=0)
else:
data[k] = tf.expand_dims(examples[0][k], axis=0)
self.create_summaries(data)
return data
def _create_input_tensors_dict_single_example(self, idx_in_minibatch):
raw_example = {}
for data_key in self._data_keys_to_use:
raw_example[data_key] = self._placeholders[data_key][idx_in_minibatch]
if (DataKeys.IMAGES in raw_example):
raw_example[DataKeys.RAW_IMAGES] = raw_example[DataKeys.IMAGES]
example = self.process_raw_example(raw_example)
return example
@abstractmethod
def get_feed_dict_for_next_step(self):
pass
|
@register_dataset(NAME)
class GrabcutDataset(FileListDataset):
def __init__(self, config, subset, name=NAME):
super().__init__(config, dataset_name=name, subset=subset, default_path=DEFAULT_PATH, num_classes=2)
def postproc_annotation(self, ann_filename, ann):
ann_postproc = tf.where(tf.equal(ann, 255), tf.ones_like(ann), ann)
ann_postproc = tf.where(tf.equal(ann_postproc, 128), (tf.ones_like(ann) * 255), ann_postproc)
return ann_postproc
def read_inputfile_lists(self):
img_dir = (self.data_dir + 'images/')
gt_dir = (self.data_dir + 'images-gt/')
imgs = []
gts = []
for filename in glob.glob((img_dir + '*')):
fn_base = filename.split('/')[(- 1)].rsplit('.')[0]
imgs += [filename]
gts += [((gt_dir + fn_base) + '.png')]
return (imgs, gts)
|
@register_dataset(NAME)
class KITTIMaskedDiosDataset(KITTIMturkersInstanceDataset):
def __init__(self, config, subset, name=NAME):
super().__init__(config, subset, name)
def get_extraction_keys(self):
return self.pascal_masked_dataset.get_extraction_keys()
def postproc_example_before_assembly(self, tensors):
return self.pascal_masked_dataset.postproc_example_before_assembly(tensors)
def use_segmentation_mask(self, res):
self.pascal_masked_dataset.use_segmentation_mask(res)
def postproc_annotation(self, ann_filename, ann):
mask = super().postproc_annotation(ann_filename, ann)
return {DataKeys.SEGMENTATION_LABELS: mask, DataKeys.RAW_SEGMENTATION_LABELS: mask, DataKeys.IMAGE_FILENAMES: ann_filename}
|
@register_dataset(NAME)
class KITTIMturkersInstanceDataset(FileListDataset):
def __init__(self, config, subset, name=NAME):
super(KITTIMturkersInstanceDataset, self).__init__(config, name, subset, DEFAULT_PATH, 2)
def read_inputfile_lists(self):
files = glob.glob((self.data_dir + 'object/segmentations_jay_per_instance_new/*.png'))
imgs = [(((self.data_dir + 'object/image_2/') + file.split('/')[(- 1)].split(':')[0]) + '.png') for file in files]
return (imgs, files)
|
def register_dataset(name, **args):
name = name.lower()
def _register(dataset):
_registered_datasets[name] = (dataset, args)
return dataset
return _register
|
def load_dataset(config, subset, session, name):
if (not hasattr(load_dataset, '_imported')):
load_dataset._imported = True
import_submodules('datasets')
name = name.lower()
if (name not in _registered_datasets):
raise ValueError((('dataset ' + name) + ' not registered.'))
(dataset, args) = _registered_datasets[name]
return dataset(config=config, subset=subset, **args)
|
@register_dataset(NAME)
class PascalVOCDataset(FileListDataset):
def __init__(self, config, name, subset, num_classes):
data_dir = config.string('data_dir', DEFAULT_PATH)
super().__init__(config, name, subset, data_dir, num_classes)
def read_inputfile_lists(self):
data_list = ('train.txt' if (self.subset == 'train') else 'val.txt')
data_list = ('datasets/PascalVOC/' + data_list)
imgs = []
ans = []
with open(data_list) as f:
for l in f:
(im, an) = l.strip().split()
im = (self.data_dir + im)
an = (self.data_dir + an)
imgs.append(im)
ans.append(an)
return (imgs, ans)
|
def normalize(img, img_mean=IMAGENET_RGB_MEAN, img_std=IMAGENET_RGB_STD):
if hasattr(img, 'get_shape'):
l = img.get_shape()[(- 1)]
if ((img_mean is not None) and (l != img_mean.size)):
img_mean = np.concatenate([img_mean, np.zeros((l - img_mean.size), dtype='float32')], axis=0)
if ((img_std is not None) and (l != img_std.size)):
img_std = np.concatenate([img_std, np.ones((l - img_std.size), dtype='float32')], axis=0)
if (img_mean is not None):
img -= img_mean
if (img_std is not None):
img /= img_std
return img
|
def unnormalize(img, img_mean=IMAGENET_RGB_MEAN, img_std=IMAGENET_RGB_STD):
if hasattr(img, 'get_shape'):
l = img.get_shape()[(- 1)]
if ((img_mean is not None) and (l != img_mean.size)):
img_mean = np.concatenate([img_mean, np.zeros((l - img_mean.size), dtype='float32')], axis=0)
if ((img_std is not None) and (l != img_std.size)):
img_std = np.concatenate([img_std, np.ones((l - img_std.size), dtype='float32')], axis=0)
if (img_std is not None):
img *= img_std
if (img_mean is not None):
img += img_mean
return img
|
def save_with_pascal_colormap(filename, arr):
colmap = (np.array(pascal_colormap) * 255).round().astype('uint8')
palimage = Image.new('P', (16, 16))
palimage.putpalette(colmap)
im = Image.fromarray(np.squeeze(arr.astype('uint8')))
im2 = im.quantize(palette=palimage)
im2.save(filename)
|
class Forwarder(ABC):
def __init__(self, engine):
self.engine = engine
self.config = engine.config
self.session = engine.session
self.val_data = self.engine.valid_data
self.train_data = self.engine.train_data
self.trainer = self.engine.trainer
self.saver = self.engine.saver
@abstractmethod
def forward(self):
pass
|
def init_log(config):
log_dir = config.dir('log_dir', 'logs')
model = config.string('model')
filename = ((log_dir + model) + '.log')
verbosity = config.int('log_verbosity', 3)
log.initialize([filename], [verbosity], [])
|
def main(_):
assert (len(sys.argv) == 2), 'usage: main.py <config>'
config_path = sys.argv[1]
assert os.path.exists(config_path), config_path
try:
config = Config(config_path)
except ValueError as e:
print('Malformed config file:', e)
return (- 1)
init_log(config)
print(open(config_path).read(), file=log.v4)
engine = Engine(config)
engine.run()
|
class Conv(Layer):
output_layer = False
def __init__(self, name, inputs, n_features, tower_setup, filter_size=(3, 3), old_order=False, strides=(1, 1), dilation=None, pool_size=(1, 1), pool_strides=None, activation='relu', dropout=0.0, batch_norm=False, bias=False, batch_norm_decay=Layer.BATCH_NORM_DECAY_DEFAULT, l2=Layer.L2_DEFAULT, padding='SAME'):
super(Conv, self).__init__()
(curr, n_features_inp) = prepare_input(inputs)
filter_size = list(filter_size)
strides = list(strides)
pool_size = list(pool_size)
if (pool_strides is None):
pool_strides = pool_size
with tf.variable_scope(name):
W = self.create_weight_variable('W', (filter_size + [n_features_inp, n_features]), l2, tower_setup)
b = None
if bias:
b = self.create_bias_variable('b', [n_features], tower_setup)
if old_order:
curr = apply_dropout(curr, dropout)
if (dilation is None):
curr = conv2d(curr, W, strides, padding=padding)
else:
curr = conv2d_dilated(curr, W, dilation, padding=padding)
if bias:
curr += b
if batch_norm:
curr = self.create_and_apply_batch_norm(curr, n_features, batch_norm_decay, tower_setup)
curr = get_activation(activation)(curr)
else:
if batch_norm:
curr = self.create_and_apply_batch_norm(curr, n_features_inp, batch_norm_decay, tower_setup)
curr = get_activation(activation)(curr)
curr = apply_dropout(curr, dropout)
if (dilation is None):
curr = conv2d(curr, W, strides, padding=padding)
else:
curr = conv2d_dilated(curr, W, dilation, padding=padding)
if bias:
curr += b
if (pool_size != [1, 1]):
curr = max_pool(curr, pool_size, pool_strides)
self.outputs = [curr]
|
class ConvTranspose(Layer):
output_layer = False
def __init__(self, name, inputs, n_features, tower_setup, filter_size=(3, 3), strides=(1, 1), activation='relu', batch_norm=False, bias=False, batch_norm_decay=Layer.BATCH_NORM_DECAY_DEFAULT, l2=Layer.L2_DEFAULT, padding='SAME'):
super(ConvTranspose, self).__init__()
(curr, n_features_inp) = prepare_input(inputs)
filter_size = list(filter_size)
strides = list(strides)
with tf.variable_scope(name):
W = self.create_weight_variable('W', (filter_size + [n_features, n_features_inp]), l2, tower_setup)
b = None
if bias:
b = self.create_bias_variable('b', [n_features], tower_setup)
curr = conv2d_transpose(curr, W, strides, padding=padding)
if bias:
curr = tf.nn.bias_add(curr, b)
if batch_norm:
curr = self.create_and_apply_batch_norm(curr, n_features, batch_norm_decay, tower_setup)
curr = get_activation(activation)(curr)
self.outputs = [curr]
|
class ConvForOutput(Layer):
output_layer = False
def __init__(self, name, inputs, dataset, n_features, tower_setup, filter_size=(1, 1), input_activation=None, dilation=None, l2=Layer.L2_DEFAULT, dropout=0.0):
super().__init__()
if (n_features == (- 1)):
n_features = dataset.num_classes()
filter_size = list(filter_size)
(inp, n_features_inp) = prepare_input(inputs)
if (input_activation is not None):
inp = get_activation(input_activation)(inp)
inp = apply_dropout(inp, dropout)
with tf.variable_scope(name):
W = self.create_weight_variable('W', (filter_size + [n_features_inp, n_features]), l2, tower_setup)
b = self.create_bias_variable('b', [n_features], tower_setup)
if (dilation is None):
output = (conv2d(inp, W) + b)
else:
output = (conv2d_dilated(inp, W, dilation) + b)
self.outputs = [output]
|
class ResidualUnit(Layer):
output_layer = False
def __init__(self, name, inputs, tower_setup, n_convs=2, n_features=None, dilations=None, strides=None, filter_size=None, activation='relu', dropout=0.0, batch_norm_decay=Layer.BATCH_NORM_DECAY_DEFAULT, l2=Layer.L2_DEFAULT):
super().__init__()
(curr, n_features_inp) = prepare_input(inputs)
res = curr
assert (n_convs >= 1), n_convs
if (dilations is not None):
assert (strides is None)
elif (strides is None):
strides = ([[1, 1]] * n_convs)
if (filter_size is None):
filter_size = ([[3, 3]] * n_convs)
if (n_features is None):
n_features = n_features_inp
if (not isinstance(n_features, list)):
n_features = ([n_features] * n_convs)
with tf.variable_scope(name):
curr = self.create_and_apply_batch_norm(curr, n_features_inp, batch_norm_decay, tower_setup, 'bn0')
curr = get_activation(activation)(curr)
if tower_setup.is_training:
curr = apply_dropout(curr, dropout)
if (strides is None):
strides_res = [1, 1]
else:
strides_res = np.prod(strides, axis=0).tolist()
if ((n_features[(- 1)] != n_features_inp) or (strides_res != [1, 1])):
W0 = self.create_weight_variable('W0', ([1, 1] + [n_features_inp, n_features[(- 1)]]), l2, tower_setup)
if (dilations is None):
res = conv2d(curr, W0, strides_res)
else:
res = conv2d(curr, W0)
W1 = self.create_weight_variable('W1', (filter_size[0] + [n_features_inp, n_features[0]]), l2, tower_setup)
if (dilations is None):
curr = conv2d(curr, W1, strides[0])
else:
curr = conv2d_dilated(curr, W1, dilations[0])
for idx in range(1, n_convs):
curr = self.create_and_apply_batch_norm(curr, n_features[(idx - 1)], batch_norm_decay, tower_setup, ('bn' + str((idx + 1))))
curr = get_activation(activation)(curr)
Wi = self.create_weight_variable(('W' + str((idx + 1))), (filter_size[idx] + [n_features[(idx - 1)], n_features[idx]]), l2, tower_setup)
if (dilations is None):
curr = conv2d(curr, Wi, strides[idx])
else:
curr = conv2d_dilated(curr, Wi, dilations[idx])
curr += res
self.outputs = [curr]
|
class Upsampling(Layer):
def __init__(self, name, inputs, tower_setup, n_features, concat, activation='relu', filter_size=(3, 3), l2=Layer.L2_DEFAULT):
super(Upsampling, self).__init__()
filter_size = list(filter_size)
assert isinstance(concat, list)
assert (len(concat) > 0)
(curr, n_features_inp) = prepare_input(inputs)
(concat_inp, n_features_concat) = prepare_input(concat)
curr = tf.image.resize_nearest_neighbor(curr, tf.shape(concat_inp)[1:3])
curr = tf.concat([curr, concat_inp], axis=3)
n_features_curr = (n_features_inp + n_features_concat)
with tf.variable_scope(name):
W = self.create_weight_variable('W', (filter_size + [n_features_curr, n_features]), l2, tower_setup)
b = self.create_bias_variable('b', [n_features], tower_setup)
curr = (conv2d(curr, W) + b)
curr = get_activation(activation)(curr)
self.outputs = [curr]
|
class FullyConnected(Layer):
def __init__(self, name, inputs, n_features, tower_setup, activation='relu', dropout=0.0, batch_norm=False, batch_norm_decay=Layer.BATCH_NORM_DECAY_DEFAULT, l2=Layer.L2_DEFAULT):
super(FullyConnected, self).__init__()
(inp, n_features_inp) = prepare_input(inputs)
with tf.variable_scope(name):
inp = apply_dropout(inp, dropout)
if batch_norm:
inp = tf.expand_dims(inp, axis=0)
inp = tf.expand_dims(inp, axis=0)
inp = self.create_and_apply_batch_norm(inp, n_features_inp, batch_norm_decay, tower_setup)
inp = tf.squeeze(inp, axis=[0, 1])
W = self.create_weight_variable('W', [n_features_inp, n_features], l2, tower_setup)
b = self.create_bias_variable('b', [n_features], tower_setup)
z = (tf.matmul(inp, W) + b)
h = get_activation(activation)(z)
self.outputs = [h]
self.n_features = n_features
|
class Layer():
BATCH_NORM_DECAY_DEFAULT = 0.95
BATCH_NORM_EPSILON = 1e-05
L2_DEFAULT = 0.0001
def __init__(self):
self.summaries = []
self.regularizers = []
self.losses = []
self.update_ops = []
self.outputs = []
self.measures = {}
self.extractions = {}
self.n_params = 0
def add_scalar_summary(self, op, name):
summary = tf.summary.scalar(name, op)
self.summaries.append(summary)
def add_image_summary(self, im, name):
summary = tf.summary.image(name, im)
self.summaries.append(summary)
def create_and_apply_batch_norm(self, inp, n_features, decay, tower_setup, scope_name='bn'):
(beta, gamma, moving_mean, moving_var) = create_batch_norm_vars(n_features, tower_setup, scope_name)
self.n_params += (2 * n_features)
if tower_setup.is_main_train_tower:
assert tower_setup.is_training
if (tower_setup.is_training and (not tower_setup.freeze_batchnorm)):
(xn, batch_mean, batch_var) = tf.nn.fused_batch_norm(inp, gamma, beta, epsilon=Layer.BATCH_NORM_EPSILON, is_training=True)
if tower_setup.is_main_train_tower:
update_op1 = moving_averages.assign_moving_average(moving_mean, batch_mean, decay, zero_debias=False, name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(moving_var, batch_var, decay, zero_debias=False, name='var_ema_op')
self.update_ops.append(update_op1)
self.update_ops.append(update_op2)
return xn
else:
xn = tf.nn.batch_normalization(inp, moving_mean, moving_var, beta, gamma, Layer.BATCH_NORM_EPSILON)
return xn
def create_weight_variable(self, name, shape, l2, tower_setup, trainable=True, initializer=None):
with tf.device(tower_setup.variable_device):
if (initializer is None):
initializer = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)
self.n_params += np.prod(shape)
W = tf.get_variable(name, shape, tower_setup.dtype, initializer, trainable=trainable)
if (l2 > 0.0):
self.regularizers.append((l2 * tf.nn.l2_loss(W)))
if tower_setup.use_weight_summaries:
summ = tf.summary.histogram(name, W)
self.summaries.append(summ)
self.add_scalar_summary(tf.reduce_max(tf.abs(W)), (name + '/W_abs_max'))
return W
def create_bias_variable(self, name, shape, tower_setup, trainable=True, initializer=None):
with tf.device(tower_setup.variable_device):
if (initializer is None):
initializer = tf.constant_initializer(0.0, dtype=tower_setup.dtype)
self.n_params += np.prod(shape)
b = tf.get_variable(name, shape, tower_setup.dtype, initializer, trainable=trainable)
if tower_setup.use_weight_summaries:
summ = tf.summary.histogram(name, b)
self.summaries.append(summ)
self.add_scalar_summary(tf.reduce_max(tf.abs(b)), (name + '/b_abs_max'))
return b
|
class Network():
def __init__(self, config, dataset, is_trainnet, freeze_batchnorm, name, reuse_variables=None):
self.name = name
self.batch_size = (- 1)
if (not is_trainnet):
self.batch_size = config.int('batch_size_eval', (- 1))
if (self.batch_size == (- 1)):
self.batch_size = config.int('batch_size')
self.own_dataset_per_gpu = config.bool('own_dataset_per_gpu', False)
self.input_tensors_dict = dataset.create_input_tensors_dict(self.batch_size)
self._towers = self._build_towers(config, dataset, is_trainnet, freeze_batchnorm, reuse_variables)
if is_trainnet:
print('number of parameters:', '{:,}'.format(self._towers[0].n_params), file=log.v1)
self.tower_total_losses_with_regularizers = [t.total_loss_with_regularizer for t in self._towers]
self.tower_setups = [t.setup for t in self._towers]
self.tower_measures = [t.measures for t in self._towers]
self.tower_extractions = [t.extractions for t in self._towers]
for k in self.input_tensors_dict:
assert (k not in self.tower_extractions[0])
self.tower_extractions[0].update(self.input_tensors_dict)
self.update_ops = sum([t.update_ops for t in self._towers], [])
self.summaries = sum([t.summaries for t in self._towers], [])
self.summaries.extend(dataset.summaries)
def _build_towers(self, config, dataset, is_trainnet, freeze_batchnorm, reuse_variables):
if is_trainnet:
n_gpus = config.int('gpus', 1)
else:
n_gpus = 1
towers = []
with tf.name_scope(self.name):
for gpu_idx in range(n_gpus):
if (n_gpus == 1):
input_tensors_dict_sliced = self.input_tensors_dict
variable_device = '/gpu:0'
else:
if self.own_dataset_per_gpu:
if (gpu_idx == 0):
input_tensors_dict_sliced = self.input_tensors_dict
else:
input_tensors_dict_sliced = dataset.create_input_tensors_dict(self.batch_size)
else:
assert ((self.batch_size % n_gpus) == 0), 'batch_size must be divisible by the number of gpus'
slice_size = (self.batch_size // n_gpus)
slice_start = (slice_size * gpu_idx)
slice_end = (slice_size * (gpu_idx + 1))
input_tensors_dict_sliced = {k: v[slice_start:slice_end] for (k, v) in self.input_tensors_dict.items()}
variable_device = '/cpu:0'
reuse_variables_in_tower = reuse_variables
if (reuse_variables_in_tower is None):
reuse_variables_in_tower = ((not is_trainnet) or (gpu_idx > 0))
tower_setup = TowerSetup(gpu_idx=gpu_idx, reuse_variables=reuse_variables_in_tower, dataset=dataset, variable_device=variable_device, is_training=is_trainnet, is_main_train_tower=(is_trainnet and (gpu_idx == 0)), freeze_batchnorm=freeze_batchnorm, network_name=self.name)
tower = NetworkTower(config, tower_setup, input_tensors_dict_sliced, dataset)
towers.append(tower)
return towers
|
def get_layer_class(layer_class):
if (not hasattr(get_layer_class, '_imported')):
get_layer_class._imported = True
import_submodules('network')
constructors = [l for l in Layer.__subclasses__() if (l.__name__ == layer_class)]
assert (len(constructors) == 1), constructors
class_ = constructors[0]
assert (class_ is not None), ('Unknown layer class', layer_class)
return class_
|
class TowerSetup():
def __init__(self, gpu_idx, reuse_variables, dataset, variable_device, is_main_train_tower, is_training, freeze_batchnorm, network_name, use_weight_summaries=False):
self.gpu_idx = gpu_idx
self.reuse_variables = reuse_variables
self.dataset = dataset
self.variable_device = variable_device
self.dtype = tf.float32
self.use_weight_summaries = use_weight_summaries
self.is_main_train_tower = is_main_train_tower
self.is_training = is_training
self.freeze_batchnorm = freeze_batchnorm
self.network_name = network_name
|
class NetworkTower():
def __init__(self, config, tower_setup, input_tensors_dict, dataset):
network_def = config.dict('network')
self.setup = tower_setup
self.layers = {}
self.summaries = []
self.losses = []
self.regularizers = []
self.update_ops = []
self.measures = {}
self.extractions = {}
if tower_setup.is_main_train_tower:
print('inputs:', file=log.v4)
for (k, v) in input_tensors_dict.items():
print(k, v.get_shape().as_list(), file=log.v4)
print('network:', file=log.v4)
gpu_str = ('/gpu:' + str(tower_setup.gpu_idx))
tower_name = ('tower_gpu_' + str(tower_setup.gpu_idx))
with tf.variable_scope(tf.get_variable_scope(), reuse=tower_setup.reuse_variables), tf.device(gpu_str), tf.name_scope(tower_name):
for (name, layer_def) in network_def.items():
layer = self._create_layer(name, layer_def, tower_setup, input_tensors_dict, dataset)
self.layers[name] = layer
if tower_setup.is_main_train_tower:
print((name + ': '), file=log.v4, end='')
for out in layer.outputs:
print(out.get_shape().as_list(), file=log.v4, end='')
print(file=log.v4)
for layer in self.layers.values():
self.summaries.extend(layer.summaries)
self.losses.extend(layer.losses)
self.regularizers.extend(layer.regularizers)
self.update_ops.extend(layer.update_ops)
for k in layer.measures.keys():
assert (k not in self.measures)
self.measures.update(layer.measures)
for k in layer.extractions.keys():
assert (k not in self.extractions)
self.extractions.update(layer.extractions)
if (len(self.losses) == 0):
loss = tf.constant(0, dtype=tf.float32)
else:
loss = tf.add_n(self.losses)
if (len(self.regularizers) == 0):
reg = tf.constant(0, dtype=tf.float32)
else:
reg = tf.add_n(self.regularizers)
self.total_loss_with_regularizer = (loss + reg)
self.n_params = sum([l.n_params for l in self.layers.values()], 0)
def _create_layer(self, name, layer_def, tower_setup, input_tensors_dict, dataset):
layer_class = layer_def['class']
class_ = get_layer_class(layer_class)
spec = inspect.getargspec(class_.__init__)
args = spec[0]
layer_def = layer_def.copy()
if ('tower_setup' in args):
layer_def['tower_setup'] = tower_setup
if ('dataset' in args):
layer_def['dataset'] = dataset
if ('network_input_dict' in args):
layer_def['network_input_dict'] = input_tensors_dict
if ('from' in layer_def):
inputs = sum([self.layers[x].outputs for x in layer_def['from']], [])
del layer_def['from']
else:
inputs = [input_tensors_dict['inputs']]
if ('concat' in layer_def):
concat = sum([self.layers[x].outputs for x in layer_def['concat']], [])
layer_def['concat'] = concat
layer_def['inputs'] = inputs
layer_def['name'] = name
del layer_def['class']
defaults = spec[3]
if (defaults is None):
defaults = []
n_non_default_args = (len(args) - len(defaults))
non_default_args = args[1:n_non_default_args]
for arg in non_default_args:
assert (arg in layer_def), (name, arg)
layer = class_(**layer_def)
return layer
|
class SegmentationSoftmax(Layer):
output_layer = True
def __init__(self, name, inputs, dataset, network_input_dict, tower_setup, resize_targets=False, resize_logits=False, loss='ce', fraction=None):
super().__init__()
self.n_classes = dataset.num_classes()
targets = network_input_dict[DataKeys.SEGMENTATION_LABELS]
assert (targets.get_shape().ndims == 4), targets.get_shape()
assert (not (resize_targets and resize_logits))
assert (len(inputs) == 1), len(inputs)
logits = inputs[0]
assert (logits.get_shape()[(- 1)] == self.n_classes)
if resize_targets:
print('warning, using resize_targets=True, so the resulting scores will not be computed at the initial resolution', file=log.v1)
targets = tf.image.resize_nearest_neighbor(targets, tf.shape(logits)[1:3])
if resize_logits:
logits = tf.image.resize_images(logits, tf.shape(targets)[1:3])
output = tf.nn.softmax(logits, (- 1), 'softmax')
self.outputs = [output]
if (self.n_classes == 2):
self.extractions[Extractions.SEGMENTATION_POSTERIORS] = output[(..., 1)]
class_pred = tf.argmax(logits, axis=3)
targets = tf.cast(targets, tf.int64)
targets = tf.squeeze(targets, axis=3)
self.loss = self._create_loss(loss, fraction, logits, targets)
self.losses.append(self.loss)
batch_size = smart_shape(targets)[0]
if ((not tower_setup.is_training) and (batch_size == 1) and (DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE in network_input_dict)):
print(tower_setup.network_name, name, ': Using SEGMENTATION_LABELS_ORIGINAL_SIZE for calculating IoU', file=log.v1)
targets_for_measures = network_input_dict[DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE]
targets_for_measures = tf.cast(targets_for_measures, tf.int64)
targets_for_measures = tf.squeeze(targets_for_measures, axis=3)
self.extractions[Extractions.SEGMENTATION_MASK_INPUT_SIZE] = class_pred
class_pred_for_measures = self._resize_predictions_to_original_size(class_pred, network_input_dict, targets_for_measures)
self.extractions[Extractions.SEGMENTATION_MASK_ORIGINAL_SIZE] = class_pred_for_measures
else:
print(tower_setup.network_name, name, ': Using SEGMENTATION_LABELS for calculating IoU', file=log.v1)
targets_for_measures = targets
class_pred_for_measures = class_pred
self.extractions[Extractions.SEGMENTATION_MASK_INPUT_SIZE] = class_pred_for_measures
self.measures = self._create_measures(class_pred_for_measures, targets_for_measures)
self.add_image_summary(tf.cast(tf.expand_dims(class_pred, axis=3), tf.float32), 'predicted labels')
self.add_scalar_summary(self.loss, 'loss')
def _create_loss(self, loss_str, fraction, logits, targets):
raw_ce = None
n_valid_pixels_per_im = None
if ('ce' in loss_str):
no_void_label_mask = tf.not_equal(targets, VOID_LABEL)
targets_no_void = tf.where(no_void_label_mask, targets, tf.zeros_like(targets))
raw_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets_no_void, name='ce')
raw_ce *= tf.cast(no_void_label_mask, tf.float32)
n_valid_pixels_per_im = tf.reduce_sum(tf.cast(no_void_label_mask, tf.int32), axis=[1, 2])
if (loss_str == 'ce'):
ce_per_im = tf.reduce_sum(raw_ce, axis=[1, 2])
ce_per_im /= tf.cast(tf.maximum(n_valid_pixels_per_im, 1), tf.float32)
ce_total = tf.reduce_mean(ce_per_im, axis=0)
loss = ce_total
elif (loss_str == 'bootstrapped_ce'):
loss = bootstrapped_ce_loss(raw_ce, fraction, n_valid_pixels_per_im)
elif (loss_str == 'class_balanced_ce'):
loss = class_balanced_ce_loss(raw_ce, targets, self.n_classes)
else:
assert False, ('unknown loss', loss_str)
return loss
def _create_measures(self, pred, targets):
n_examples = tf.shape(targets)[0]
measures = {Measures.LOSS: (self.loss * tf.cast(n_examples, tf.float32)), Measures.N_EXAMPLES: n_examples}
if (self.n_classes == 2):
binary_measures = compute_measures_for_binary_segmentation_tf(pred, targets)
measures.update(binary_measures)
return measures
@staticmethod
def _resize_predictions_to_original_size(class_pred, network_input_dict, targets_for_measures):
if (DataKeys.CROP_BOXES_y0x0y1x1 in network_input_dict):
crop_box = tf.squeeze(network_input_dict[DataKeys.CROP_BOXES_y0x0y1x1], axis=0)
(y0, x0, y1, x1) = tf.unstack(crop_box)
height_before_resize = (y1 - y0)
width_before_resize = (x1 - x0)
else:
(height_before_resize, width_before_resize) = tf.shape(targets_for_measures)[1:3]
(y0, x0, y1, x1) = (None, None, None, None)
class_pred_original_size = tf.squeeze(tf.image.resize_nearest_neighbor(class_pred[(..., tf.newaxis)], [height_before_resize, width_before_resize]), axis=(- 1))
if (DataKeys.CROP_BOXES_y0x0y1x1 in network_input_dict):
pad_y_l = y0
pad_y_r = (tf.shape(targets_for_measures)[1] - y1)
pad_x_l = x0
pad_x_r = (tf.shape(targets_for_measures)[2] - x1)
class_pred_for_measures = tf.pad(class_pred_original_size, [[0, 0], [pad_y_l, pad_y_r], [pad_x_l, pad_x_r]])
else:
class_pred_for_measures = class_pred_original_size
return class_pred_for_measures
|
def conv2d(x, W, strides=(1, 1), padding='SAME'):
strides = list(strides)
return tf.nn.conv2d(x, W, strides=(([1] + strides) + [1]), padding=padding)
|
def conv2d_transpose(x, W, strides=(1, 1), padding='SAME'):
strides = list(strides)
W_shape = tf.shape(W)
inputs_shape = tf.shape(x)
out_height = deconv_output_length(inputs_shape[1], W_shape[0], padding, strides[0])
out_width = deconv_output_length(inputs_shape[2], W_shape[1], padding, strides[1])
output_shape = (inputs_shape[0], out_height, out_width, W_shape[2])
return tf.nn.conv2d_transpose(x, W, tf.stack(output_shape), strides=(([1] + strides) + [1]), padding=padding)
|
def conv2d_dilated(x, W, dilation, padding='SAME'):
res = tf.nn.atrous_conv2d(x, W, dilation, padding=padding)
shape = x.get_shape().as_list()
shape[(- 1)] = W.get_shape().as_list()[(- 1)]
res.set_shape(shape)
return res
|
def create_batch_norm_vars(n_out, tower_setup, scope_name='bn'):
with tf.device(tower_setup.variable_device), tf.variable_scope(scope_name):
initializer_zero = tf.constant_initializer(0.0, dtype=tf.float32)
beta = tf.get_variable('beta', [n_out], tf.float32, initializer_zero)
initializer_gamma = tf.constant_initializer(1.0, dtype=tf.float32)
gamma = tf.get_variable('gamma', [n_out], tf.float32, initializer_gamma)
mean_ema = tf.get_variable('mean_ema', [n_out], tf.float32, initializer_zero, trainable=False)
var_ema = tf.get_variable('var_ema', [n_out], tf.float32, initializer_zero, trainable=False)
return (beta, gamma, mean_ema, var_ema)
|
def get_activation(act_str):
assert (act_str.lower() in _activations), ('Unknown activation function ' + act_str)
return _activations[act_str.lower()]
|
def prepare_input(inputs):
if (len(inputs) == 1):
inp = inputs[0]
dim = int(inp.get_shape()[(- 1)])
else:
dims = [int(inp.get_shape()[3]) for inp in inputs]
dim = sum(dims)
inp = tf.concat(inputs, axis=3)
return (inp, dim)
|
def apply_dropout(inp, dropout):
if (dropout == 0.0):
return inp
else:
keep_prob = (1.0 - dropout)
return tf.nn.dropout(inp, keep_prob)
|
def max_pool(x, shape, strides=None, padding='SAME'):
if (strides is None):
strides = shape
return tf.nn.max_pool(x, ksize=(([1] + shape) + [1]), strides=(([1] + strides) + [1]), padding=padding)
|
def bootstrapped_ce_loss(raw_ce, fraction, n_valid_pixels_per_im):
ks = tf.maximum(tf.cast(tf.round((tf.cast(n_valid_pixels_per_im, tf.float32) * fraction)), tf.int32), 1)
def bootstrapped_ce_for_one_img(args):
(one_ce, k) = args
hardest = tf.nn.top_k(tf.reshape(one_ce, [(- 1)]), k, sorted=False)[0]
return tf.reduce_mean(hardest)
loss_per_im = tf.map_fn(bootstrapped_ce_for_one_img, [raw_ce, ks], dtype=tf.float32)
return tf.reduce_mean(loss_per_im)
|
def class_balanced_ce_loss(raw_ce, targets, n_classes):
def class_balanced_ce_for_one_img(args):
(ce, target) = args
cls_losses = []
for cls in range(n_classes):
cls_mask = tf.equal(target, cls)
n_cls = tf.reduce_sum(tf.cast(cls_mask, tf.int32))
cls_loss = (tf.reduce_sum(tf.boolean_mask(ce, cls_mask)) / tf.cast(tf.maximum(n_cls, 1), tf.float32))
cls_losses.append(cls_loss)
return (tf.add_n(cls_losses) / n_classes)
loss_per_im = tf.map_fn(class_balanced_ce_for_one_img, [raw_ce, targets], dtype=tf.float32)
return tf.reduce_mean(loss_per_im)
|
def _mobilenet_v2(net, depth_multiplier, output_stride, reuse=None, scope=None, final_endpoint=None):
"Auxiliary function to add support for 'reuse' to mobilenet_v2.\n\n Args:\n net: Input tensor of shape [batch_size, height, width, channels].\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n output_stride: An integer that specifies the requested ratio of input to\n output spatial resolution. If not None, then we invoke atrous convolution\n if necessary to prevent the network from reducing the spatial resolution\n of the activation maps. Allowed values are 8 (accurate fully convolutional\n mode), 16 (fast fully convolutional mode), 32 (classification mode).\n reuse: Reuse model variables.\n scope: Optional variable scope.\n final_endpoint: The endpoint to construct the network up to.\n\n Returns:\n Features extracted by MobileNetv2.\n "
with tf.variable_scope(scope, 'MobilenetV2', [net], reuse=reuse) as scope:
return mobilenet_lib.mobilenet_base(net, conv_defs=mobilenet_v2.V2_DEF, multiplier=depth_multiplier, final_endpoint=(final_endpoint or _MOBILENET_V2_FINAL_ENDPOINT), output_stride=output_stride, scope=scope)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.