repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.4/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.4/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.4/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.4/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.4/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.4/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/tn_update/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/tn_update/wfn_optimization.py | import numpy as np
import tequila as tq
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("jax")
import torch
import itertools
import copy
import sys
from my_mpo import *
def normalize(me, order=2):
return me/np.linalg.norm(me, ord=order)
# Computes <psiL | H | psiR>
def contract_energy(H, psiL, psiR) -> float:
energy = 0
# For test:
# en_einsum = np.einsum('ijkl, i, j, k, l', H, psiL, psiR, np.conj(psiL), np.conj(psiR))
energy = tn.ncon([psiL, psiR, H, np.conj(psiL), np.conj(psiR)], [(1,), (2,), (1, 2, 3, 4), (3,), (4,)], backend='jax')
if isinstance(energy, torch.Tensor):
energy = energy.numpy()
return np.real(energy)
# Computes <psiL | H | psiR>
def contract_energy_mpo(H, psiL, psiR, rangeL=None, rangeR=None) -> float:
if rangeL is None and rangeR is None:
rangeL = range(n_qubits//2)
rangeR = range(n_qubits//2, n_qubits)
elif rangeL is None and not rangeR is None or not rangeL is None and rangeR is None:
raise Exception("this can't be the case, either specify both or neither")
energy = 0
n_qubits = H.n_qubits
d = int(2**(n_qubits/2))
# en_einsum = np.einsum('ijkl, i, j, k, l', H, psiL, psiR, np.conj(psiL), np.conj(psiR))
indexs = 0
for mpo in H.mpo:
nodes = [tn.Node(mpo.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network along bond dimensions
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Gather dangling edges
dummy_edges = [nodes[0].get_edge(0), nodes[-1].get_edge(1)]
mid_edges = [nodes[n_qubits//2-1].get_edge(1), nodes[n_qubits//2].get_edge(0)]
# edges_upper_l = [nodes[q].get_edge(2) for q in range(n_qubits//2)]
# edges_lower_l = [nodes[q].get_edge(3) for q in range(n_qubits//2)]
# edges_upper_r = [nodes[q].get_edge(2) for q in range(n_qubits//2, n_qubits)]
# edges_lower_r = [nodes[q].get_edge(3) for q in range(n_qubits//2, n_qubits)]
edges_upper_l = [nodes[q].get_edge(2) for q in rangeL]
edges_lower_l = [nodes[q].get_edge(3) for q in rangeL]
edges_upper_r = [nodes[q].get_edge(2) for q in rangeR]
edges_lower_r = [nodes[q].get_edge(3) for q in rangeR]
# Connect psi's to MPO
psiL = psiL.reshape([2 for _ in range(n_qubits//2)]) # this should be ok cause it's within
psiR = psiR.reshape([2 for _ in range(n_qubits//2)])
psiL_node = tn.Node(psiL)
psiR_node = tn.Node(psiR)
psiLdg = np.conj(psiL)
psiRdg = np.conj(psiR)
psiLdg_node = tn.Node(psiLdg)
psiRdg_node = tn.Node(psiRdg)
for i_e, e in enumerate(psiL_node.edges):
e ^ edges_upper_l[i_e]
for i_e, e in enumerate(psiR_node.edges):
e ^ edges_upper_r[i_e]
for i_e, e in enumerate(psiLdg_node.edges):
e ^ edges_lower_l[i_e]
for i_e, e in enumerate(psiRdg_node.edges):
e ^ edges_lower_r[i_e]
res = tn.contractors.auto(nodes+[psiL_node, psiR_node,
psiLdg_node, psiRdg_node],
ignore_edge_order=True)
energy += res.tensor.numpy()[0][0]
return np.real(energy)
def tmp_full_to_LR_wfn(wfn_array, d, subsysL: list = [0,1,2], subsysR: list = [3,4,5]) -> np.ndarray:
# psiL, psiR = np.zeros(d, dtype='complex'), np.zeros(d, dtype='complex')
# This does not work at all
def fetch_vec_per_subsys(wfn_array: np.ndarray, subsystem: list) -> np.ndarray:
subsystem.sort()
out_list = []
index_list = [0] # |000...0> is in every subsystem!
for q in subsystem:
index_list_copy = copy.deepcopy(index_list)
for index in index_list_copy:
tmp = index + int(2**q)
index_list += [tmp]
index_list.sort()
out_wfn = np.zeros(len(index_list))
for it, index in enumerate(index_list):
out_wfn[it] = wfn_array[index]
return out_wfn
# Get from previous solution and renormalize
psiL = fetch_vec_per_subsys(wfn_array, subsysL)
psiL = normalize(psiL)
psiR = fetch_vec_per_subsys(wfn_array, subsysR)
psiR = normalize(psiR)
return psiL, psiR
def update_psi(env, psi, SL):
out_psi = np.conj(env) - SL*psi
return normalize(out_psi)
def update_psi_mpo(env_conj, psi, SL):
out_psi = env_conj - SL*psi
return normalize(out_psi)
def compute_environment(H, psiL, psiR, which: str='l'):
if which.lower() == 'l':
# env = np.einsum('j, ijkl, k, l', psiR, H, np.conj(psiL), np.conj(psiR), optimize='greedy')
env = tn.ncon([psiR, H, np.conj(psiL), np.conj(psiR)], [(2,), (-1, 2, 3, 4), (3,), (4,)],
backend='jax')
if which.lower() == 'r':
# env = np.einsum('i, ijkl, k, l', psiL, H, np.conj(psiL), np.conj(psiR), optimize='greedy')
env = tn.ncon([psiL, H, np.conj(psiL), np.conj(psiR)], [(1,), (1, -2, 3, 4), (3,), (4,)],
backend='jax')
return env
def compute_environment_mpo(H, psiL, psiR, which: str='l', rangeL=None, rangeR=None):
if rangeL is None and rangeR is None:
rangeL = range(n_qubits//2)
rangeR = range(n_qubits//2, n_qubits)
elif rangeL is None and not rangeR is None or not rangeL is None and rangeR is None:
raise Exception("this can't be the case, either specify both or neither")
n_qubits = H.n_qubits
d = int(2**(n_qubits/2))
environment = None
first = True
for mpo in H.mpo:
nodes = [tn.Node(mpo.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network along bond dimensions
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Gather dangling edges
dummy_edges = [nodes[0].get_edge(0), nodes[-1].get_edge(1)]
mid_edges = [nodes[n_qubits//2-1].get_edge(1), nodes[n_qubits//2].get_edge(0)]
edges_upper_l = [nodes[q].get_edge(2) for q in rangeL]
edges_lower_l = [nodes[q].get_edge(3) for q in rangeL]
edges_upper_r = [nodes[q].get_edge(2) for q in rangeR]
edges_lower_r = [nodes[q].get_edge(3) for q in rangeR]
# edges_upper_l = [nodes[q].get_edge(2) for q in range(n_qubits//2)]
# edges_lower_l = [nodes[q].get_edge(3) for q in range(n_qubits//2)]
# edges_upper_r = [nodes[q].get_edge(2) for q in range(n_qubits//2, n_qubits)]
# edges_lower_r = [nodes[q].get_edge(3) for q in range(n_qubits//2, n_qubits)]
# Connect psi's to MPO
psiL = psiL.reshape([2 for _ in range(n_qubits//2)])
psiR = psiR.reshape([2 for _ in range(n_qubits//2)])
psiL_node = tn.Node(psiL)
psiR_node = tn.Node(psiR)
psiLdg = np.conj(psiL)
psiRdg = np.conj(psiR)
psiLdg_node = tn.Node(psiLdg)
psiRdg_node = tn.Node(psiRdg)
# If want right environment, connect with psiL and add psiR-nodes to output edges
if which.lower() == 'r':
for i_e, e in enumerate(psiL_node.edges):
e ^ edges_upper_l[i_e]
network = nodes + [psiL_node, psiLdg_node, psiRdg_node]
output_edge_order = dummy_edges + edges_upper_r
if which.lower() == 'l':
for i_e, e in enumerate(psiR_node.edges):
e ^ edges_upper_r[i_e]
network = nodes + [psiR_node, psiLdg_node, psiRdg_node]
output_edge_order = dummy_edges + edges_upper_l
for i_e, e in enumerate(psiLdg_node.edges):
e ^ edges_lower_l[i_e]
for i_e, e in enumerate(psiRdg_node.edges):
e ^ edges_lower_r[i_e]
res = tn.contractors.auto(network,
output_edge_order=output_edge_order)
if not first:
environment += res.tensor
else:
environment = res.tensor
if isinstance(environment, torch.Tensor):
environment = environment.numpy()
return np.conj(environment.reshape(d))
# "Optimize" vectors
def optimize_wavefunctions(H, psiL, psiR, SL=1., TOL=1e-8, silent=True):
it = 0
energy = 0
dE = 12.7
stuck = False
while dE > TOL and not stuck:
it += 1
# L-update
envL = compute_environment(H, psiL, psiR, 'L')
psiL = update_psi(envL, psiL, SL)
# R-update
envR = compute_environment(H, psiL, psiR, 'R')
psiR = update_psi(envR, psiR, SL)
old_energy = energy
energy = contract_energy(H, psiL, psiR)
if not silent:
print("At ", it, " have energy ", energy)
else:
if not it%100:
print("At ", it, " have energy ", energy)
dE = np.abs(energy - old_energy)
if it > 500:
stuck = True
#print("\tEnergy optimization reached ", energy, " after ", it, " iterations.")
if stuck:
return None
else:
return energy, psiL, psiR
# "Optimize" vectors --- MPO Version
def optimize_wavefunctions_mpo(H, psiL, psiR, SL=1., TOL=1e-10, silent=True):
it = 0
energy = 0
dE = 12.7
rangeL, rangeR = None, None
n_qubits = H.n_qubits
'''
modified ranges!
'''
rangeL = range(1, n_qubits//2+1)
rangeR = itertools.chain([0], range(n_qubits//2+1, n_qubits))
'''
end modified ranges!
'''
while dE > TOL:
it += 1
# L-update
envL_conj = compute_environment_mpo(H, psiL, psiR, 'L', rangeL=rangeL, rangeR=rangeR)
psiL = update_psi_mpo(envL_conj, psiL, SL)
# R-update
envR_conj = compute_environment_mpo(H, psiL, psiR, 'R', rangeL=rangeL, rangeR=rangeR)
psiR = update_psi_mpo(envR_conj, psiR, SL)
old_energy = energy
energy = contract_energy_mpo(H, psiL, psiR, rangeL=rangeL, rangeR=rangeR)
if not silent:
print("At ", it, " have energy ", energy)
dE = np.abs(energy - old_energy)
#print("\tEnergy optimization reached ", energy, " after ", it, " iterations.")
return energy, psiL, psiR
# def wfvec_to_tensor(wfvec, subs_qubits: int = 3):
# shape = tuple([2 for _ in range(subs_qubits)])
# return wfvec.reshape(shape)
#
# def tensor_to_wfvec(tensor, subs_qubits: int = 3):
# d = int(2**(subs_qubits))
# return tensor.reshape(d)
def initialize_wfns_randomly(dim: int = 8, n_qubits: int = 3):
# psi = np.random.rand(dim)# + 1.j*(np.random.rand(dim)-1/2)
psi = np.random.rand(dim)-1/2 + 1.j*(np.random.rand(dim)-1/2)
psi = normalize(psi)
return psi
# def initialize_wfns_randomly_mpo(dim: int = 8, n_qubits: int = 3):
# psi = np.random.rand(dim) + 1.j*(np.random.rand(dim)-1/2)
# psi = psi.reshape(tuple([2 for _ in range(n_qubits)]))
# psi = normalize_mpo(psi)
#
# return psi
def main():
# First construct it, will load Hamiltonian later
# mol = tq.Molecule(geometry='H 0.0 0.0 0.0\n H 0.0 0.0 0.7', basis_set='6-31g', active_orbitals=list(range(3)), transformation='jordan-wigner')
# mol = tq.Molecule(geometry='H 0.0 0.0 0.0\n H 0.0 0.0 0.7', basis_set='sto-3g', transformation='jordan-wigner')
mol = tq.Molecule(geometry='O 0.0 0.0 0.0\n H 0.0 0.755 -0.476\n H 0.0 -0.755 -0.476', basis_set='sto-3g', backend='psi4', threads=12)
H = mol.make_hamiltonian().simplify()
n_qubits = len(H.qubits)
print("n_qubits:", n_qubits)
d = int(2**(n_qubits/2))
print("d:", d)
# I somehow thought the following might be a good idea, but apparently it does not really work ^^
# Still keeping it here just in case that it's just because of some bug
"""
# In a longer term, we might try to somehow translate from a tq.QubitWavefunction here...
# For now instead of random vector, let's get the UCCD-one and separate it
U = mol.make_upccgsd_ansatz(name='uccd')
E = tq.ExpectationValue(H=H, U=U)
res = tq.minimize(objective=E, method='slsqp', silent=True)
print("Optimized energy:", res.energy)
# tq.QubitWavefunction
wfn = tq.simulate(objective=U, variables=res.angles)
# print(wfn)
# As array (here then with size 2**6 = 64)
wfn_array = wfn.to_array()
# print(wfn_array)
# Just as a test
# Now separate the wavefunction into two subsystems, where each then has size 2**3 = 8
psiL, psiR = tmp_full_to_LR_wfn(wfn_array, d)
# Let's see what separated version of UCCD-solution gives... we lost something, so we should expect worse than FCI
sep_energy = contract_energy(H_mat_tq, psiL, psiR)
print("Initial separated UCCD energy:", sep_energy)
# Optimize wavefunctions based UCCD-solution
energy_U, psiL_U, psiR_U = optimize_wavefunctions(H_mat_tq, psiL, psiR)
print("Optimized wfns:", psiL_U, psiR_U)
"""
# # Now, use Lukasz's Hamiltonian
# H = np.loadtxt('filename.txt', dtype='complex', delimiter=',')
# H_mat = np.reshape(H.to_matrix(), (d, d, d, d))
H_mpo = MyMPO(hamiltonian=H, n_qubits=n_qubits, maxdim=400)
H_mpo.make_mpo_from_hamiltonian()
psiL_rand = initialize_wfns_randomly(d, n_qubits//2)
psiR_rand = initialize_wfns_randomly(d, n_qubits//2)
psiL_rand_mpo = initialize_wfns_randomly(d, n_qubits//2)
psiR_rand_mpo = initialize_wfns_randomly(d, n_qubits//2)
# en = contract_energy(H_mat, psiL_rand, psiR_rand)
en_mpo = contract_energy_mpo(H_mpo, psiL_rand, psiR_rand)
# print("Initial random state energy:", en)
print("Initial random state energy mpo:", en_mpo)
# Optimize wavefunctions based on random guess
# energy_rand, _, _ = optimize_wavefunctions(H_mat, psiL_rand, psiR_rand,
# silent=False)
import time
t0 = time.time()
energy_rand, psiL_rand, psiR_rand = optimize_wavefunctions_mpo(H_mpo,
psiL_rand,
psiR_rand,
silent=False)
t1 = time.time()
print("needed ", t1-t0, " seconds.")
# Execute main function
if __name__ == '__main__':
main()
| 14,270 | 37.57027 | 148 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/tn_update/qq.py | import numpy as np
import tequila as tq
import tensornetwork as tn
import itertools
import copy
def normalize(me, order=2):
return me/np.linalg.norm(me, ord=order)
# Computes <psiL | H | psiR>
def contract_energy(H, psiL, psiR) -> float:
energy = 0
# For test:
# en_einsum = np.einsum('ijkl, i, j, k, l', H, psiL, psiR, np.conj(psiL), np.conj(psiR))
energy = tn.ncon([psiL, psiR, H, np.conj(psiL), np.conj(psiR)], [(1,), (2,), (1, 2, 3, 4), (3,), (4,)], backend='pytorch')
return np.real(energy)
def tmp_full_to_LR_wfn(wfn_array, d, subsysL: list = [0,1,2], subsysR: list = [3,4,5]) -> np.ndarray:
# psiL, psiR = np.zeros(d, dtype='complex'), np.zeros(d, dtype='complex')
# This does not work at all
def fetch_vec_per_subsys(wfn_array: np.ndarray, subsystem: list) -> np.ndarray:
subsystem.sort()
out_list = []
index_list = [0] # |000...0> is in every subsystem!
for q in subsystem:
index_list_copy = copy.deepcopy(index_list)
for index in index_list_copy:
tmp = index + int(2**q)
index_list += [tmp]
index_list.sort()
out_wfn = np.zeros(len(index_list))
for it, index in enumerate(index_list):
out_wfn[it] = wfn_array[index]
return out_wfn
# Get from previous solution and renormalize
psiL = fetch_vec_per_subsys(wfn_array, subsysL)
psiL = normalize(psiL)
psiR = fetch_vec_per_subsys(wfn_array, subsysR)
psiR = normalize(psiR)
return psiL, psiR
def update_psi(env, psi, SL):
out_psi = np.conj(env) - SL*psi
return normalize(out_psi)
def compute_environment(H, psiL, psiR, which: str='l'):
if which.lower() == 'l':
# env = np.einsum('j, ijkl, k, l', psiR, H, np.conj(psiL), np.conj(psiR), optimize='greedy')
env = tn.ncon([psiR, H, np.conj(psiL), np.conj(psiR)], [(2,), (-1, 2, 3, 4), (3,), (4,)],
backend='pytorch')
if which.lower() == 'r':
# env = np.einsum('i, ijkl, k, l', psiL, H, np.conj(psiL), np.conj(psiR), optimize='greedy')
env = tn.ncon([psiL, H, np.conj(psiL), np.conj(psiR)], [(1,), (1, -2, 3, 4), (3,), (4,)],
backend='pytorch')
return env
# "Optimize" vectors
def optimize_wavefunctions(H, psiL, psiR, SL=1., TOL=1e-10, silent=True):
it = 0
energy = 0
dE = 12.7
while dE > TOL:
it += 1
# L-update
envL = compute_environment(H, psiL, psiR, 'L')
psiL = update_psi(envL, psiL, SL)
# R-update
envR = compute_environment(H, psiL, psiR, 'R')
psiR = update_psi(envR, psiR, SL)
old_energy = energy
energy = contract_energy(H, psiL, psiR)
if not silent:
print("At ", it, " have energy ", energy)
dE = np.abs(energy - old_energy)
if not silent:
print("Reached final energy of ", energy, " after ", it, " iterations.")
return energy, psiL, psiR
def main():
n_qubits = 6
# First construct it, will load Hamiltonian later
mol = tq.Molecule(geometry='H 0.0 0.0 0.0\n H 0.0 0.0 0.7', basis_set='6-31g', active_orbitals=list(range(n_qubits//2)), transformation='jordan-wigner')
H = mol.make_hamiltonian().simplify()
d = int(2**(n_qubits/2))
print(d)
# Reshape Hamiltonian matrix
# TODO this is supposed to become a MPO
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
H_mol = mol.make_molecular_hamiltonian()
print(H_mol)
# Guess we should use this to transform into MPO
# CHECK ORDERING OF H_mol (might be Mulliken, but likely the openfermion one!)
# H = h_0 + h_pq a^p a_q + h_pqrs a^p a^q a_s a_r
# h_0: identity over everything
# rest: ~ JW
raise Exception(".")
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
H_mat_tq = np.reshape(H.to_matrix(), (d, d, d, d))
# print(H_mat)
# I somehow thought the following might be a good idea, but apparently it does not really work ^^
# Still keeping it here just in case that it's just because of some bug
"""
# In a longer term, we might try to somehow translate from a tq.QubitWavefunction here...
# For now instead of random vector, let's get the UCCD-one and separate it
U = mol.make_upccgsd_ansatz(name='uccd')
E = tq.ExpectationValue(H=H, U=U)
res = tq.minimize(objective=E, method='slsqp', silent=True)
print("Optimized energy:", res.energy)
# tq.QubitWavefunction
wfn = tq.simulate(objective=U, variables=res.angles)
# print(wfn)
# As array (here then with size 2**6 = 64)
wfn_array = wfn.to_array()
# print(wfn_array)
# Just as a test
# Now separate the wavefunction into two subsystems, where each then has size 2**3 = 8
psiL, psiR = tmp_full_to_LR_wfn(wfn_array, d)
# Let's see what separated version of UCCD-solution gives... we lost something, so we should expect worse than FCI
sep_energy = contract_energy(H_mat_tq, psiL, psiR)
print("Initial separated UCCD energy:", sep_energy)
# Optimize wavefunctions based UCCD-solution
energy_U, psiL_U, psiR_U = optimize_wavefunctions(H_mat_tq, psiL, psiR)
print("Optimized wfns:", psiL_U, psiR_U)
"""
# Now, use Lukasz's Hamiltonian
H = np.loadtxt('filename.txt', dtype='complex', delimiter=',')
H_mat = np.reshape(H, (d, d, d, d))
def construct_psi_randomly(dim: int = 8):
# psi = np.random.rand(dim)# + 1.j*(np.random.rand(dim)-1/2)
psi = np.random.rand(dim)-1/2 + 1.j*(np.random.rand(dim)-1/2)
psi /= np.linalg.norm(psi, ord=2)
return psi
psiL_rand = construct_psi_randomly(d)
psiR_rand = construct_psi_randomly(d)
en = contract_energy(H_mat, psiL_rand, psiR_rand)
print("Initial random state energy:", en)
# Optimize wavefunctions based on random guess
energy_rand, psiL_rand, psiR_rand = optimize_wavefunctions(H_mat, psiL_rand, psiR_rand)
print("Optimized wfns:", psiL_rand, psiR_rand)
# Execute main function
if __name__ == '__main__':
main()
| 6,227 | 32.483871 | 156 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.5/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.5/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.5/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.75/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.75/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.75/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_0.75/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_0.75/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_0.75/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.3/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.3/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.3/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.5/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.5/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.5/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_1.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_3.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_3.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_3.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.75/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.75/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.75/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
mt3 | mt3-main/setup.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install mt3."""
import os
import sys
import setuptools
# To enable importing version.py directly, we add its path to sys.path.
version_path = os.path.join(os.path.dirname(__file__), 'mt3')
sys.path.append(version_path)
from version import __version__ # pylint: disable=g-import-not-at-top
setuptools.setup(
name='mt3',
version=__version__,
description='Multi-Task Multitrack Music Transcription',
author='Google Inc.',
author_email='no-reply@google.com',
url='http://github.com/magenta/mt3',
license='Apache 2.0',
packages=setuptools.find_packages(),
package_data={
'': ['*.gin'],
},
scripts=[],
install_requires=[
'absl-py',
'flax @ git+https://github.com/google/flax#egg=flax',
'gin-config',
'immutabledict',
'librosa',
'mir_eval',
'note_seq',
'numpy',
'pretty_midi',
'scikit-learn',
'scipy',
'seqio @ git+https://github.com/google/seqio#egg=seqio',
't5',
't5x @ git+https://github.com/google-research/t5x#egg=t5x',
'tensorflow',
'tensorflow-datasets',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
tests_require=['pytest'],
setup_requires=['pytest-runner'],
keywords='music transcription machinelearning audio',
)
| 2,153 | 30.676471 | 74 | py |
mt3 | mt3-main/mt3/network.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5.1.1 Transformer model."""
from typing import Any, Sequence
from flax import linen as nn
from flax import struct
import jax.numpy as jnp
from mt3 import layers
@struct.dataclass
class T5Config:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
# Activation dtypes.
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_encoder_layers: int = 6
num_decoder_layers: int = 6
head_dim: int = 64
mlp_dim: int = 2048
# Activation functions are retrieved from Flax.
mlp_activations: Sequence[str] = ('relu',)
dropout_rate: float = 0.1
# If `True`, the embedding weights are used in the decoder output layer.
logits_via_embedding: bool = False
class EncoderLayer(nn.Module):
"""Transformer encoder layer."""
config: T5Config
@nn.compact
def __call__(self, inputs, encoder_mask=None, deterministic=False):
cfg = self.config
# Attention block.
assert inputs.ndim == 3
x = layers.LayerNorm(
dtype=cfg.dtype, name='pre_attention_layer_norm')(
inputs)
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='attention')(
x, x, encoder_mask, deterministic=deterministic)
x = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = layers.LayerNorm(dtype=cfg.dtype, name='pre_mlp_layer_norm')(x)
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = layers.MlpBlock(
intermediate_dim=cfg.mlp_dim,
activations=cfg.mlp_activations,
intermediate_dropout_rate=cfg.dropout_rate,
dtype=cfg.dtype,
name='mlp',
)(y, deterministic=deterministic)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
y = y + x
return y
class DecoderLayer(nn.Module):
"""Transformer decoder layer that attends to the encoder."""
config: T5Config
@nn.compact
def __call__(self,
inputs,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
deterministic=False,
decode=False,
max_decode_length=None):
cfg = self.config
# inputs: embedded inputs to the decoder with shape [batch, length, emb_dim]
x = layers.LayerNorm(
dtype=cfg.dtype, name='pre_self_attention_layer_norm')(
inputs)
# Self-attention block
x = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='self_attention')(
x,
x,
decoder_mask,
deterministic=deterministic,
decode=decode)
x = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic)
x = x + inputs
# Encoder-Decoder block.
y = layers.LayerNorm(
dtype=cfg.dtype, name='pre_cross_attention_layer_norm')(
x)
y = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='encoder_decoder_attention')(
y, encoded, encoder_decoder_mask, deterministic=deterministic)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
y = y + x
# MLP block.
z = layers.LayerNorm(dtype=cfg.dtype, name='pre_mlp_layer_norm')(y)
z = layers.MlpBlock(
intermediate_dim=cfg.mlp_dim,
activations=cfg.mlp_activations,
intermediate_dropout_rate=cfg.dropout_rate,
dtype=cfg.dtype,
name='mlp',
)(z, deterministic=deterministic)
z = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
z, deterministic=deterministic)
z = z + y
return z
class Encoder(nn.Module):
"""A stack of encoder layers."""
config: T5Config
@nn.compact
def __call__(self,
encoder_input_tokens,
encoder_mask=None,
deterministic=False):
cfg = self.config
assert encoder_input_tokens.ndim == 3 # [batch, length, depth]
seq_length = encoder_input_tokens.shape[-2]
inputs_positions = jnp.arange(seq_length)[None, :]
# [batch, length, depth] -> [batch, length, emb_dim]
x = layers.DenseGeneral( # pytype: disable=wrong-arg-types # jax-types
cfg.emb_dim,
dtype=cfg.dtype,
kernel_init=nn.linear.default_kernel_init,
kernel_axes=('vocab', 'embed'),
name='continuous_inputs_projection')(encoder_input_tokens)
x = x + layers.FixedEmbed(features=cfg.emb_dim)(inputs_positions)
x = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic)
x = x.astype(cfg.dtype)
for lyr in range(cfg.num_encoder_layers):
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = EncoderLayer(
config=cfg,
name=f'layers_{lyr}')(x, encoder_mask, deterministic)
x = layers.LayerNorm(dtype=cfg.dtype, name='encoder_norm')(x)
return nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=deterministic)
class Decoder(nn.Module):
"""A stack of decoder layers as a part of an encoder-decoder architecture."""
config: T5Config
@nn.compact
def __call__(self,
encoded,
decoder_input_tokens,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
deterministic=False,
decode=False,
max_decode_length=None):
cfg = self.config
assert decoder_input_tokens.ndim == 2 # [batch, len]
seq_length = decoder_input_tokens.shape[-1]
decoder_positions = jnp.arange(seq_length)[None, :]
# [batch, length] -> [batch, length, emb_dim]
y = layers.Embed( # pytype: disable=wrong-arg-types # jax-types
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
dtype=cfg.dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
one_hot=True,
name='token_embedder')(decoder_input_tokens.astype('int32'))
y = y + layers.FixedEmbed(features=cfg.emb_dim)(
decoder_positions, decode=decode)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
y = y.astype(cfg.dtype)
for lyr in range(cfg.num_decoder_layers):
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = DecoderLayer(
config=cfg, name=f'layers_{lyr}')(
y,
encoded,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
deterministic=deterministic,
decode=decode,
max_decode_length=max_decode_length)
y = layers.LayerNorm(dtype=cfg.dtype, name='decoder_norm')(y)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
# [batch, length, emb_dim] -> [batch, length, vocab_size]
if cfg.logits_via_embedding:
# Use the transpose of embedding matrix for logit transform.
logits = self.shared_embedding.attend(y)
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(y.shape[-1])
else:
logits = layers.DenseGeneral(
cfg.vocab_size,
dtype=jnp.float32, # Use float32 for stabiliity.
kernel_axes=('embed', 'vocab'),
name='logits_dense')(
y)
return logits
class Transformer(nn.Module):
"""An encoder-decoder Transformer model."""
config: T5Config
def setup(self):
cfg = self.config
self.encoder = Encoder(config=cfg)
self.decoder = Decoder(config=cfg)
def encode(self,
encoder_input_tokens,
encoder_segment_ids=None,
enable_dropout=True):
"""Applies Transformer encoder-branch on the inputs."""
cfg = self.config
assert encoder_input_tokens.ndim == 3 # (batch, length, depth)
# Make padding attention mask; we don't actually mask out any input
# positions, letting the model potentially attend to the zero vector used as
# padding.
encoder_mask = layers.make_attention_mask(
jnp.ones(encoder_input_tokens.shape[:-1]),
jnp.ones(encoder_input_tokens.shape[:-1]),
dtype=cfg.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if encoder_segment_ids is not None:
encoder_mask = layers.combine_masks(
encoder_mask,
layers.make_attention_mask(
encoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=cfg.dtype))
return self.encoder(
encoder_input_tokens, encoder_mask, deterministic=not enable_dropout)
def decode(
self,
encoded,
encoder_input_tokens, # only needed for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
decoder_positions=None,
enable_dropout=True,
decode=False,
max_decode_length=None):
"""Applies Transformer decoder-branch on encoded-input and target."""
cfg = self.config
# Make padding attention masks.
if decode:
# Do not mask decoder attention based on targets padding at
# decoding/inference time.
decoder_mask = None
encoder_decoder_mask = layers.make_attention_mask(
jnp.ones_like(decoder_target_tokens),
jnp.ones(encoder_input_tokens.shape[:-1]),
dtype=cfg.dtype)
else:
decoder_mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=cfg.dtype,
decoder_segment_ids=decoder_segment_ids)
encoder_decoder_mask = layers.make_attention_mask(
decoder_target_tokens > 0,
jnp.ones(encoder_input_tokens.shape[:-1]),
dtype=cfg.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if encoder_segment_ids is not None:
if decode:
raise ValueError(
'During decoding, packing should not be used but '
'`encoder_segment_ids` was passed to `Transformer.decode`.')
encoder_decoder_mask = layers.combine_masks(
encoder_decoder_mask,
layers.make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=cfg.dtype))
logits = self.decoder(
encoded,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
deterministic=not enable_dropout,
decode=decode,
max_decode_length=max_decode_length)
return logits.astype(self.config.dtype)
def __call__(self,
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False):
"""Applies Transformer model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is a shifted version of the former. For a packed dataset, it usually
has additional processing applied. For example, the first element of each
sequence has id 0 instead of the shifted EOS id from the previous sequence.
Args:
encoder_input_tokens: input data to the encoder.
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
encoder_positions: encoder subsequence positions for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Ensables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
Returns:
logits array from full transformer.
"""
encoded = self.encode(
encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
enable_dropout=enable_dropout)
return self.decode(
encoded,
encoder_input_tokens, # only used for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=encoder_segment_ids,
decoder_segment_ids=decoder_segment_ids,
decoder_positions=decoder_positions,
enable_dropout=enable_dropout,
decode=decode)
| 13,894 | 32.890244 | 80 | py |
mt3 | mt3-main/mt3/layers.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dense attention classes and mask/weighting functions."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import dataclasses
import functools
import operator
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as np
# from flax.linen.partitioning import param_with_axes, with_sharding_constraint
param_with_axes = nn_partitioning.param_with_axes
with_sharding_constraint = nn_partitioning.with_sharding_constraint
# Type annotations
Array = jnp.ndarray
DType = jnp.dtype
PRNGKey = jnp.ndarray
Shape = Sequence[int]
Activation = Callable[..., Array]
# Parameter initializers.
Initializer = Callable[[PRNGKey, Shape, DType], Array]
default_embed_init = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal', out_axis=0)
def sinusoidal(min_scale: float = 1.0,
max_scale: float = 10000.0,
dtype: DType = jnp.float32) -> Initializer:
"""Creates 1D Sinusoidal Position Embedding Initializer.
Args:
min_scale: Minimum frequency-scale in sine grating.
max_scale: Maximum frequency-scale in sine grating.
dtype: The DType of the returned values.
Returns:
The sinusoidal initialization function.
"""
def init(key: PRNGKey, shape: Shape, dtype: DType = dtype) -> Array:
"""Sinusoidal init."""
del key
if dtype != np.float32:
raise ValueError('The sinusoidal initializer only supports float32.')
if len(list(shape)) != 2:
raise ValueError(
f'Expected a 2D shape (max_len, features), but got {shape}.')
max_len, features = shape
pe = np.zeros((max_len, features), dtype=dtype)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (features // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, features // 2) * scale_factor)
pe[:, :features // 2] = np.sin(position * div_term)
pe[:, features // 2:2 * (features // 2)] = np.cos(position * div_term)
return jnp.array(pe)
return init
def dot_product_attention(query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
deterministic: bool = False,
dtype: DType = jnp.float32,
float32_logits: bool = False):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights.
Args:
query: queries for calculating attention with shape of `[batch, q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch, kv_length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch, kv_length,
num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch, num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
Returns:
Output of shape `[batch, length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
# `attn_weights`: [batch, num_heads, q_length, kv_length]
attn_weights = jnp.einsum('bqhd,bkhd->bhqk', query, key)
# Apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias.astype(attn_weights.dtype)
# Normalize the attention weights across `kv_length` dimension.
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
# Apply attention dropout.
if not deterministic and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
dropout_shape[-2] = 1
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
keep = jnp.broadcast_to(keep, attn_weights.shape)
multiplier = (
keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# Take the linear combination of `value`.
return jnp.einsum('bhqk,bkhd->bqhd', attn_weights, value)
dynamic_vector_slice_in_dim = jax.vmap(
lax.dynamic_slice_in_dim, in_axes=(None, 0, None, None))
class MultiHeadDotProductAttention(nn.Module):
"""Multi-head dot-product attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
head_dim: dimension of each head.
dtype: the dtype of the computation.
dropout_rate: dropout rate
kernel_init: initializer for the kernel of the Dense layers.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
"""
num_heads: int
head_dim: int
dtype: DType = jnp.float32
dropout_rate: float = 0.
kernel_init: Initializer = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal')
float32_logits: bool = False # computes logits in float32 for stability.
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
decode: bool = False,
deterministic: bool = False) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode` argument. For decoding, this method is called twice,
first to initialize the cache and then for an actual decoding process. The
two calls are differentiated by the presence of 'cached_key' in the variable
dict. In the cache initialization stage, the cache variables are initialized
as zeros and will be filled in the subsequent decoding process.
In the cache initialization call, `inputs_q` has a shape [batch, length,
q_features] and `inputs_kv`: [batch, length, kv_features]. During the
incremental decoding stage, query, key and value all have the shape [batch,
1, qkv_features] corresponding to a single step.
Args:
inputs_q: input queries of shape `[batch, q_length, q_features]`.
inputs_kv: key/values of shape `[batch, kv_length, kv_features]`.
mask: attention mask of shape `[batch, num_heads, q_length, kv_length]`.
bias: attention bias of shape `[batch, num_heads, q_length, kv_length]`.
decode: Whether to prepare and use an autoregressive cache.
deterministic: Disables dropout if set to True.
Returns:
output of shape `[batch, length, q_features]`.
"""
projection = functools.partial(
DenseGeneral,
axis=-1,
features=(self.num_heads, self.head_dim),
kernel_axes=('embed', 'joined_kv'),
dtype=self.dtype)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype)
query_init = lambda *args: self.kernel_init(*args) / depth_scaling
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch, length, num_heads, head_dim]
query = projection(kernel_init=query_init, name='query')(inputs_q)
key = projection(kernel_init=self.kernel_init, name='key')(inputs_kv)
value = projection(kernel_init=self.kernel_init, name='value')(inputs_kv)
query = with_sharding_constraint(query, ('batch', 'length', 'heads', 'kv'))
key = with_sharding_constraint(key, ('batch', 'length', 'heads', 'kv'))
value = with_sharding_constraint(value, ('batch', 'length', 'heads', 'kv'))
if decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension [batch, length, num_heads, head_dim],
# but we cache them as [batch, num_heads, head_dim, length] as a TPU
# fusion optimization. This also enables the "scatter via one-hot
# broadcast" trick, which means we do a one-hot broadcast instead of a
# scatter/gather operations, resulting in a 3-4x speedup in practice.
swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3])
cached_key = self.variable('cache', 'cached_key', jnp.zeros,
swap_dims(key.shape), key.dtype)
cached_value = self.variable('cache', 'cached_value', jnp.zeros,
swap_dims(value.shape), value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
batch, num_heads, head_dim, length = (cached_key.value.shape)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
# Sanity shape check of cached key against input query.
expected_shape = (batch, 1, num_heads, head_dim)
if expected_shape != query.shape:
raise ValueError('Autoregressive cache shape error, '
'expected query shape %s instead got %s.' %
(expected_shape, query.shape))
# Create a OHE of the current index. NOTE: the index is increased below.
cur_index = cache_index.value
one_hot_indices = jax.nn.one_hot(cur_index, length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did for
# the cached ones above.
# Note these are currently the key and value of a single position, since
# we feed one position at a time.
one_token_key = jnp.moveaxis(key, -3, -1)
one_token_value = jnp.moveaxis(value, -3, -1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key.value = key
cached_value.value = value
cache_index.value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -3)
value = jnp.moveaxis(value, -1, -3)
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
mask = combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length) <= cur_index,
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
(batch, 1, 1, length)))
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
bias = dynamic_vector_slice_in_dim(
jnp.squeeze(bias, axis=0), jnp.reshape(cur_index, (-1)), 1, -2)
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = combine_biases(attention_bias, bias)
dropout_rng = None
if not deterministic and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# Apply attention.
x = dot_product_attention(
query,
key,
value,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
deterministic=deterministic,
dtype=self.dtype,
float32_logits=self.float32_logits)
# Back to the original inputs dimensions.
out = DenseGeneral(
features=inputs_q.shape[-1], # output dim is set to the input dim.
axis=(-2, -1),
kernel_init=self.kernel_init,
kernel_axes=('joined_kv', 'embed'),
dtype=self.dtype,
name='out')(
x)
return out
def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple([ax if ax >= 0 else ndim + ax for ax in axes])
def _canonicalize_tuple(x):
if isinstance(x, Iterable):
return tuple(x)
else:
return (x,)
#------------------------------------------------------------------------------
# DenseGeneral for attention layers.
#------------------------------------------------------------------------------
class DenseGeneral(nn.Module):
"""A linear transformation (without bias) with flexible axes.
Attributes:
features: tuple with numbers of output features.
axis: tuple with axes to apply the transformation on.
dtype: the dtype of the computation (default: float32).
kernel_init: initializer function for the weight matrix.
"""
features: Union[Iterable[int], int]
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
kernel_init: Initializer = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal')
kernel_axes: Tuple[str, ...] = ()
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
inputs = jnp.asarray(inputs, self.dtype)
axis = _normalize_axes(axis, inputs.ndim)
kernel_shape = tuple([inputs.shape[ax] for ax in axis]) + features
kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),
np.prod(features))
kernel = param_with_axes(
'kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=self.kernel_axes)
kernel = jnp.asarray(kernel, self.dtype)
kernel = jnp.reshape(kernel, kernel_shape)
contract_ind = tuple(range(0, len(axis)))
return lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ())))
def _convert_to_activation_function(
fn_or_string: Union[str, Callable]) -> Callable:
"""Convert a string to an activation function."""
if fn_or_string == 'linear':
return lambda x: x
elif isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
elif callable(fn_or_string):
return fn_or_string
else:
raise ValueError("don't know how to convert %s to an activation function" %
(fn_or_string,))
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
intermediate_dim: Shared dimension of hidden layers.
activations: Type of activations for each layer. Each element is either
'linear', a string function name in flax.linen, or a function.
kernel_init: Kernel function, passed to the dense layers.
deterministic: Whether the dropout layers should be deterministic.
intermediate_dropout_rate: Dropout rate used after the intermediate layers.
dtype: Type for the dense layer.
"""
intermediate_dim: int = 2048
activations: Sequence[Union[str, Callable]] = ('relu',)
kernel_init: Initializer = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal')
intermediate_dropout_rate: float = 0.1
dtype: Any = jnp.float32
@nn.compact
def __call__(self, inputs, decode: bool = False, deterministic: bool = False):
"""Applies Transformer MlpBlock module."""
# Iterate over specified MLP input activation functions.
# e.g. ('relu',) or ('gelu', 'linear') for gated-gelu.
activations = []
for idx, act_fn in enumerate(self.activations):
dense_name = 'wi' if len(self.activations) == 1 else f'wi_{idx}'
x = DenseGeneral(
self.intermediate_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('embed', 'mlp'),
name=dense_name)(
inputs)
x = _convert_to_activation_function(act_fn)(x)
activations.append(x)
# Take elementwise product of above intermediate activations.
x = functools.reduce(operator.mul, activations)
# Apply dropout and final dense output projection.
x = nn.Dropout(
rate=self.intermediate_dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic) # Broadcast along length.
x = with_sharding_constraint(x, ('batch', 'length', 'mlp'))
output = DenseGeneral(
inputs.shape[-1],
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('mlp', 'embed'),
name='wo')(
x)
return output
class Embed(nn.Module):
"""A parameterized function from integers [0, n) to d-dimensional vectors.
Attributes:
num_embeddings: number of embeddings.
features: number of feature dimensions for each embedding.
dtype: the dtype of the embedding vectors (default: float32).
embedding_init: embedding initializer.
one_hot: performs the gather with a one-hot contraction rather than a true
gather. This is currently needed for SPMD partitioning.
"""
num_embeddings: int
features: int
cast_input_dtype: Optional[DType] = None
dtype: DType = jnp.float32
attend_dtype: Optional[DType] = None
embedding_init: Initializer = default_embed_init
one_hot: bool = False
embedding: Array = dataclasses.field(init=False)
def setup(self):
self.embedding = param_with_axes(
'embedding',
self.embedding_init, (self.num_embeddings, self.features),
jnp.float32,
axes=('vocab', 'embed'))
def __call__(self, inputs: Array) -> Array:
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, all dimensions are considered batch dimensions.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional `features` dimension appended.
"""
if self.cast_input_dtype:
inputs = inputs.astype(self.cast_input_dtype)
if not jnp.issubdtype(inputs.dtype, jnp.integer):
raise ValueError('Input type must be an integer or unsigned integer.')
if self.one_hot:
iota = lax.iota(jnp.int32, self.num_embeddings)
one_hot = jnp.array(inputs[..., jnp.newaxis] == iota, dtype=self.dtype)
output = jnp.dot(one_hot, jnp.asarray(self.embedding, self.dtype))
else:
output = jnp.asarray(self.embedding, self.dtype)[inputs]
output = with_sharding_constraint(output, ('batch', 'length', 'embed'))
return output
def attend(self, query: Array) -> Array:
"""Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An array with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
dtype = self.attend_dtype if self.attend_dtype is not None else self.dtype
return jnp.dot(query, jnp.asarray(self.embedding, dtype).T)
class FixedEmbed(nn.Module):
"""Fixed (not learnable) embeddings specified by the initializer function.
Attributes:
init_fn: The initializer function that defines the embeddings.
max_length: The maximum supported length.
dtype: The DType to use for the embeddings.
"""
features: int
max_length: int = 2048
embedding_init: Initializer = sinusoidal()
dtype: jnp.dtype = jnp.float32
def setup(self):
# The key is set to None because sinusoid init is deterministic.
shape = (self.max_length, self.features)
self.embedding = self.embedding_init(None, shape, self.dtype) # pylint: disable=too-many-function-args # pytype: disable=wrong-arg-types # jax-ndarray
@nn.compact
def __call__(self,
inputs,
*,
decode: bool = False):
"""Returns the fixed position embeddings specified by the initializer.
Args:
inputs: <int>[batch_size, seq_len] input position indices.
decode: True if running in single-position autoregressive decode mode.
Returns:
The fixed position embeddings <float32>[batch_size, seq_len, features].
"""
# We use a cache position index for tracking decoding position.
if decode:
position_embedder_index = self.variable(
'cache', 'position_embedder_index',
lambda: jnp.array(-1, dtype=jnp.uint32))
i = position_embedder_index.value
position_embedder_index.value = i + 1
return jax.lax.dynamic_slice(self.embedding, jnp.array((i, 0)),
np.array((1, self.features)))
return jnp.take(self.embedding, inputs, axis=0)
#------------------------------------------------------------------------------
# T5 Layernorm - no subtraction of mean or bias.
#------------------------------------------------------------------------------
class LayerNorm(nn.Module):
"""T5 Layer normalization operating on the last axis of the input data."""
epsilon: float = 1e-6
dtype: Any = jnp.float32
scale_init: Initializer = nn.initializers.ones
@nn.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
"""Applies layer normalization on the input."""
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * lax.rsqrt(mean2 + self.epsilon), self.dtype)
scale = param_with_axes(
'scale', self.scale_init, (features,), jnp.float32, axes=('embed',))
scale = jnp.asarray(scale, self.dtype)
return y * scale
#------------------------------------------------------------------------------
# Mask-making utility functions.
#------------------------------------------------------------------------------
def make_attention_mask(query_input: Array,
key_input: Array,
pairwise_fn: Callable = jnp.multiply,
extra_batch_dims: int = 0,
dtype: DType = jnp.float32) -> Array:
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch, len_q]`, `[batch, len_kv]`, the
attention weights will be `[batch, heads, len_q, len_kv]` and this
function will produce `[batch, 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
extra_batch_dims: number of extra batch dims to add singleton axes for, none
by default
dtype: mask return dtype
Returns:
A `[batch, 1, len_q, len_kv]` shaped mask for 1d attention.
"""
# [batch, len_q, len_kv]
mask = pairwise_fn(
# [batch, len_q] -> [batch, len_q, 1]
jnp.expand_dims(query_input, axis=-1),
# [batch, len_q] -> [batch, 1, len_kv]
jnp.expand_dims(key_input, axis=-2))
# [batch, 1, len_q, len_kv]. This creates the head dim.
mask = jnp.expand_dims(mask, axis=-3)
mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))
return mask.astype(dtype)
def make_causal_mask(x: Array,
extra_batch_dims: int = 0,
dtype: DType = jnp.float32) -> Array:
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch, len]`, the self-attention weights
will be `[batch, heads, len, len]` and this function will produce a
causal mask of shape `[batch, 1, len, len]`.
Note that a causal mask does not depend on the values of x; it only depends on
the shape. If x has padding elements, they will not be treated in a special
manner.
Args:
x: input array of shape `[batch, len]`
extra_batch_dims: number of batch dims to add singleton axes for, none by
default
dtype: mask return dtype
Returns:
A `[batch, 1, len, len]` shaped causal mask for 1d attention.
"""
idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)
return make_attention_mask(
idxs,
idxs,
jnp.greater_equal,
extra_batch_dims=extra_batch_dims,
dtype=dtype)
def combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):
"""Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: final mask dtype
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim, masks)), (
f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = jnp.logical_and(mask, other_mask)
return mask.astype(dtype)
def combine_biases(*masks: Optional[Array]):
"""Combine attention biases.
Args:
*masks: set of attention bias arguments to combine, some can be None.
Returns:
Combined mask, reduced by summation, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim, masks)), (
f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = mask + other_mask
return mask
def make_decoder_mask(decoder_target_tokens: Array,
dtype: DType,
decoder_causal_attention: Optional[Array] = None,
decoder_segment_ids: Optional[Array] = None) -> Array:
"""Compute the self-attention mask for a decoder.
Decoder mask is formed by combining a causal mask, a padding mask and an
optional packing mask. If decoder_causal_attention is passed, it makes the
masking non-causal for positions that have value of 1.
A prefix LM is applied to a dataset which has a notion of "inputs" and
"targets", e.g., a machine translation task. The inputs and targets are
concatenated to form a new target. `decoder_target_tokens` is the concatenated
decoder output tokens.
The "inputs" portion of the concatenated sequence can attend to other "inputs"
tokens even for those at a later time steps. In order to control this
behavior, `decoder_causal_attention` is necessary. This is a binary mask with
a value of 1 indicating that the position belonged to "inputs" portion of the
original dataset.
Example:
Suppose we have a dataset with two examples.
ds = [{"inputs": [6, 7], "targets": [8]},
{"inputs": [3, 4], "targets": [5]}]
After the data preprocessing with packing, the two examples are packed into
one example with the following three fields (some fields are skipped for
simplicity).
decoder_target_tokens = [[6, 7, 8, 3, 4, 5, 0]]
decoder_segment_ids = [[1, 1, 1, 2, 2, 2, 0]]
decoder_causal_attention = [[1, 1, 0, 1, 1, 0, 0]]
where each array has [batch, length] shape with batch size being 1. Then,
this function computes the following mask.
mask = [[[[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]]
mask[b, 1, :, :] represents the mask for the example `b` in the batch.
Because mask is for a self-attention layer, the mask's shape is a square of
shape [query length, key length].
mask[b, 1, i, j] = 1 means that the query token at position i can attend to
the key token at position j.
Args:
decoder_target_tokens: decoder output tokens. [batch, length]
dtype: dtype of the output mask.
decoder_causal_attention: a binary mask indicating which position should
only attend to earlier positions in the sequence. Others will attend
bidirectionally. [batch, length]
decoder_segment_ids: decoder segmentation info for packed examples. [batch,
length]
Returns:
the combined decoder mask.
"""
masks = []
# The same mask is applied to all attention heads. So the head dimension is 1,
# i.e., the mask will be broadcast along the heads dim.
# [batch, 1, length, length]
causal_mask = make_causal_mask(decoder_target_tokens, dtype=dtype)
# Positions with value 1 in `decoder_causal_attneition` can attend
# bidirectionally.
if decoder_causal_attention is not None:
# [batch, 1, length, length]
inputs_mask = make_attention_mask(
decoder_causal_attention,
decoder_causal_attention,
jnp.logical_and,
dtype=dtype)
masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype))
else:
masks.append(causal_mask)
# Padding mask.
masks.append(
make_attention_mask(
decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=dtype))
# Packing mask
if decoder_segment_ids is not None:
masks.append(
make_attention_mask(
decoder_segment_ids, decoder_segment_ids, jnp.equal, dtype=dtype))
return combine_masks(*masks, dtype=dtype) # pytype: disable=bad-return-type # jax-ndarray
| 32,586 | 38.2142 | 157 | py |
mt3 | mt3-main/mt3/layers_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention classes."""
import dataclasses
from typing import Optional
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from flax.core import freeze
from flax.linen import partitioning as nn_partitioning
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
from mt3 import layers
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
Array = jnp.ndarray
AxisMetadata = nn_partitioning.AxisMetadata # pylint: disable=invalid-name
class SelfAttention(layers.MultiHeadDotProductAttention):
"""Self-attention special case of multi-head dot-product attention."""
@nn.compact
def __call__(self,
inputs_q: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
deterministic: bool = False):
return super().__call__(
inputs_q, inputs_q, mask, bias, deterministic=deterministic)
@dataclasses.dataclass(frozen=True)
class SelfAttentionArgs:
num_heads: int = 1
batch_size: int = 2
# qkv_features: int = 3
head_dim: int = 3
# out_features: int = 4
q_len: int = 5
features: int = 6
dropout_rate: float = 0.1
deterministic: bool = False
decode: bool = False
float32_logits: bool = False
def __post_init__(self):
# If we are doing decoding, the query length should be 1, because are doing
# autoregressive decoding where we feed one position at a time.
assert not self.decode or self.q_len == 1
def init_args(self):
return dict(
num_heads=self.num_heads,
head_dim=self.head_dim,
dropout_rate=self.dropout_rate,
float32_logits=self.float32_logits)
def apply_args(self):
inputs_q = jnp.ones((self.batch_size, self.q_len, self.features))
mask = jnp.ones((self.batch_size, self.num_heads, self.q_len, self.q_len))
bias = jnp.ones((self.batch_size, self.num_heads, self.q_len, self.q_len))
return {
'inputs_q': inputs_q,
'mask': mask,
'bias': bias,
'deterministic': self.deterministic
}
class AttentionTest(parameterized.TestCase):
def test_dot_product_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5, 6
query = jnp.ones((batch_size, q_len, num_heads, qk_depth))
key = jnp.ones((batch_size, kv_len, num_heads, qk_depth))
value = jnp.ones((batch_size, kv_len, num_heads, v_depth))
bias = jnp.ones((batch_size, num_heads, q_len, kv_len))
args = dict(
query=query,
key=key,
value=value,
bias=bias,
dropout_rng=dropout_rng,
dropout_rate=0.5,
deterministic=False,
)
output = layers.dot_product_attention(**args)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
def test_make_attention_mask_multiply_pairwise_fn(self):
decoder_target_tokens = jnp.array([[7, 0, 0], [8, 5, 0]])
attention_mask = layers.make_attention_mask(
decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=jnp.int32)
expected0 = jnp.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
expected1 = jnp.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]])
self.assertEqual(attention_mask.shape, (2, 1, 3, 3))
np.testing.assert_array_equal(attention_mask[0, 0], expected0)
np.testing.assert_array_equal(attention_mask[1, 0], expected1)
def test_make_attention_mask_equal_pairwise_fn(self):
segment_ids = jnp.array([[1, 1, 2, 2, 2, 0], [1, 1, 1, 2, 0, 0]])
attention_mask = layers.make_attention_mask(
segment_ids, segment_ids, pairwise_fn=jnp.equal, dtype=jnp.int32)
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected0 = jnp.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1]])
expected1 = jnp.array([[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1]])
self.assertEqual(attention_mask.shape, (2, 1, 6, 6))
np.testing.assert_array_equal(attention_mask[0, 0], expected0)
np.testing.assert_array_equal(attention_mask[1, 0], expected1)
def test_make_causal_mask_with_padding(self):
x = jnp.array([[7, 0, 0], [8, 5, 0]])
y = layers.make_causal_mask(x)
self.assertEqual(y.shape, (2, 1, 3, 3))
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected_y = jnp.array([[[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]],
jnp.float32)
np.testing.assert_allclose(y[0], expected_y)
np.testing.assert_allclose(y[1], expected_y)
def test_make_causal_mask_extra_batch_dims(self):
x = jnp.ones((3, 3, 5))
y = layers.make_causal_mask(x, extra_batch_dims=2)
self.assertEqual(y.shape, (1, 1, 3, 3, 1, 5, 5))
def test_make_causal_mask(self):
x = jnp.ones((1, 3))
y = layers.make_causal_mask(x)
self.assertEqual(y.shape, (1, 1, 3, 3))
expected_y = jnp.array([[[[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]]],
jnp.float32)
np.testing.assert_allclose(y, expected_y)
def test_combine_masks(self):
masks = [
jnp.array([0, 1, 0, 1], jnp.float32), None,
jnp.array([1, 1, 1, 1], jnp.float32),
jnp.array([1, 1, 1, 0], jnp.float32)
]
y = layers.combine_masks(*masks)
np.testing.assert_allclose(y, jnp.array([0, 1, 0, 0], jnp.float32))
def test_combine_biases(self):
masks = [
jnp.array([0, 1, 0, 1], jnp.float32), None,
jnp.array([0, 1, 1, 1], jnp.float32),
jnp.array([0, 1, 1, 0], jnp.float32)
]
y = layers.combine_biases(*masks)
np.testing.assert_allclose(y, jnp.array([0, 3, 2, 2], jnp.float32))
def test_make_decoder_mask_lm_unpacked(self):
decoder_target_tokens = jnp.array([6, 7, 3, 0])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens, dtype=jnp.float32)
expected_mask = jnp.array([[[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[0, 0, 0, 0]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_lm_packed(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 5, 0]])
decoder_segment_ids = jnp.array([[1, 1, 1, 2, 2, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_segment_ids=decoder_segment_ids)
expected_mask = jnp.array([[[[1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0]]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_unpacked(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 3, 4, 0]])
decoder_causal_attention = jnp.array([[1, 1, 1, 0, 0, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask = jnp.array(
[[[[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]]],
dtype=jnp.float32)
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_packed(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 8, 3, 4, 0]])
decoder_segment_ids = jnp.array([[1, 1, 1, 2, 2, 2, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 1, 1, 0, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids)
expected_mask = jnp.array([[[[1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_unpacked_multiple_elements(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0], [4, 5, 0, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0], [1, 0, 0, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask0 = jnp.array([[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[0, 0, 0, 0]])
expected_mask1 = jnp.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0]])
self.assertEqual(mask.shape, (2, 1, 4, 4))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
np.testing.assert_array_equal(mask[1, 0], expected_mask1)
def test_make_decoder_mask_composite_causal_attention(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 8, 9, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0, 1, 1, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask0 = jnp.array([[1, 1, 0, 0, 1, 1, 0], [1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.assertEqual(mask.shape, (1, 1, 7, 7))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
def test_make_decoder_mask_composite_causal_attention_packed(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 8, 9, 2, 3, 4]])
decoder_segment_ids = jnp.array([[1, 1, 1, 1, 1, 1, 2, 2, 2]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0, 1, 1, 1, 1, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids)
expected_mask0 = jnp.array([[1, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1]])
self.assertEqual(mask.shape, (1, 1, 9, 9))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention(self, f):
# b: batch, f: emb_dim, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(num_heads=h, head_dim=d, dropout_rate=0)
args = base_args.init_args()
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {
'kernel': query_kernel.reshape(f, -1)
},
'key': {
'kernel': key_kernel.reshape(f, -1)
},
'value': {
'kernel': value_kernel.reshape(f, -1)
},
'out': {
'kernel': out_kernel.reshape(-1, f)
}
}
y = layers.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv)
query = np.einsum('bqf,fhd->bqhd', inputs_q, query_kernel)
key = np.einsum('bkf,fhd->bkhd', inputs_kv, key_kernel)
value = np.einsum('bkf,fhd->bkhd', inputs_kv, value_kernel)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = nn.softmax(logits, axis=-1)
combined_value = np.einsum('bhqk,bkhd->bqhd', weights, value)
y_expected = np.einsum('bqhd,hdf->bqf', combined_value, out_kernel)
np.testing.assert_allclose(y, y_expected, rtol=1e-5, atol=1e-5)
def test_multihead_dot_product_attention_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
base_args = SelfAttentionArgs(num_heads=h, head_dim=d, dropout_rate=0)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, h, d, k)),
'cached_value': np.zeros((b, h, d, k)),
'cache_index': np.array(0)
}
inputs_q = np.random.randn(b, 1, f)
inputs_kv = np.random.randn(b, 1, f)
# Mock dense general such that q, k, v projections are replaced by simple
# reshaping.
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
return x.reshape(b, -1, h, d)
with mock.patch.object(
layers.DenseGeneral, '__call__', new=mock_dense_general):
_, mutated = layers.MultiHeadDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=True,
mutable=['cache'])
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, h, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, h, d, k]
cache['cached_key'][:, :, :, 0] = key[:, 0, :, :]
cache['cached_value'][:, :, :, 0] = value[:, 0, :, :]
cache['cache_index'] = np.array(1)
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
def test_dot_product_attention(self):
# b: batch, f: emb_dim, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, h, d)
value = np.random.randn(b, k, h, d)
bias = np.random.randn(b, h, q, k)
attn_out = layers.dot_product_attention(query, key, value, bias=bias)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = jax.nn.softmax(logits + bias, axis=-1)
expected = np.einsum('bhqk,bkhd->bqhd', weights, value)
np.testing.assert_allclose(attn_out, expected, atol=1e-6)
class EmbeddingTest(parameterized.TestCase):
def test_embedder_raises_exception_for_incorrect_input_type(self):
"""Tests that inputs are integers and that an exception is raised if not."""
embed = layers.Embed(num_embeddings=10, features=5)
inputs = np.expand_dims(np.arange(5, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
bad_inputs = inputs.astype(np.float32)
with self.assertRaisesRegex(
ValueError, 'Input type must be an integer or unsigned integer.'):
_ = embed.apply(variables, bad_inputs)
@parameterized.named_parameters(
{
'testcase_name': 'with_ones',
'init_fn': jax.nn.initializers.ones,
'num_embeddings': 10,
'features': 5,
'matrix_sum': 5 * 10,
}, {
'testcase_name': 'with_zeros',
'init_fn': jax.nn.initializers.zeros,
'num_embeddings': 10,
'features': 5,
'matrix_sum': 0,
})
def test_embedding_initializes_correctly(self, init_fn, num_embeddings,
features, matrix_sum):
"""Tests if the Embed class initializes with the requested initializer."""
embed = layers.Embed(
num_embeddings=num_embeddings,
features=features,
embedding_init=init_fn)
inputs = np.expand_dims(np.arange(5, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
embedding_matrix = variables['params']['embedding']
self.assertEqual(int(np.sum(embedding_matrix)), matrix_sum)
def test_embedding_matrix_shape(self):
"""Tests that the embedding matrix has the right shape."""
num_embeddings = 10
features = 5
embed = layers.Embed(num_embeddings=num_embeddings, features=features)
inputs = np.expand_dims(np.arange(features, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
embedding_matrix = variables['params']['embedding']
self.assertEqual((num_embeddings, features), embedding_matrix.shape)
def test_embedding_attend(self):
"""Tests that attending with ones returns sum of embedding vectors."""
features = 5
embed = layers.Embed(num_embeddings=10, features=features)
inputs = np.array([[1]], dtype=np.int64)
variables = embed.init(jax.random.PRNGKey(0), inputs)
query = np.ones(features, dtype=np.float32)
result = embed.apply(variables, query, method=embed.attend)
expected = np.sum(variables['params']['embedding'], -1)
np.testing.assert_array_almost_equal(result, expected)
class DenseTest(parameterized.TestCase):
def test_dense_general_no_bias(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = layers.DenseGeneral(
features=4,
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_general_two_features(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = layers.DenseGeneral(
features=(2, 2),
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
# We transform the last input dimension to two output dimensions (2, 2).
np.testing.assert_allclose(y, np.full((1, 2, 2), 3.))
def test_dense_general_two_axes(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 2, 2))
model = layers.DenseGeneral(
features=3,
axis=(-2, 2), # Note: this is the same as (1, 2).
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
# We transform the last two input dimensions (2, 2) to one output dimension.
np.testing.assert_allclose(y, np.full((1, 3), 4.))
def test_mlp_same_out_dim(self):
module = layers.MlpBlock(
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
dtype=jnp.float32,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
params = module.init(random.PRNGKey(0), inputs, deterministic=True)
self.assertEqual(
jax.tree_map(lambda a: a.tolist(), params), {
'params': {
'wi': {
'kernel': [[
-0.8675811290740967, 0.08417510986328125,
0.022586345672607422, -0.9124102592468262
],
[
-0.19464373588562012, 0.49809837341308594,
0.7808468341827393, 0.9267289638519287
]],
},
'wo': {
'kernel': [[0.01154780387878418, 0.1397249698638916],
[0.974980354309082, 0.5903260707855225],
[-0.05997943878173828, 0.616570234298706],
[0.2934272289276123, 0.8181164264678955]],
},
},
'params_axes': {
'wi': {
'kernel_axes': AxisMetadata(names=('embed', 'mlp')),
},
'wo': {
'kernel_axes': AxisMetadata(names=('mlp', 'embed')),
},
},
})
result = module.apply(params, inputs, deterministic=True)
np.testing.assert_allclose(
result.tolist(),
[[[0.5237172245979309, 0.8508185744285583],
[0.5237172245979309, 0.8508185744285583],
[1.2344461679458618, 2.3844780921936035]],
[[1.0474344491958618, 1.7016371488571167],
[0.6809444427490234, 0.9663378596305847],
[1.0474344491958618, 1.7016371488571167]]],
rtol=1e-6,
)
if __name__ == '__main__':
absltest.main()
| 21,675 | 38.699634 | 81 | py |
FairAC | FairAC-main/src/utils.py | #%%
import numpy as np
import scipy.sparse as sp
import torch
import os
import pandas as pd
import dgl
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
#%%
#%%
def load_data(path="../dataset/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
print(labels)
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def load_pokec(dataset,sens_attr,predict_attr, path="../dataset/pokec/", label_number=1000,sens_number=500,seed=19,test_idx=False):
"""Load data"""
print('Loading {} dataset from {}'.format(dataset,path))
idx_features_labels = pd.read_csv(os.path.join(path,"{}.csv".format(dataset)))
header = list(idx_features_labels.columns)
header.remove("user_id")
header.remove(sens_attr)
header.remove(predict_attr)
features = sp.csr_matrix(idx_features_labels[header], dtype=np.float32)
labels = idx_features_labels[predict_attr].values
# build graph
idx = np.array(idx_features_labels["user_id"], dtype=int)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(os.path.join(path,"{}_relationship.txt".format(dataset)), dtype=int)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=int).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = adj + sp.eye(adj.shape[0])
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(labels)
# adj = sparse_mx_to_torch_sparse_tensor(adj)
import random
random.seed(seed)
label_idx = np.where(labels>=0)[0]
random.shuffle(label_idx)
idx_train = label_idx[:min(int(0.5 * len(label_idx)),label_number)]
idx_val = label_idx[int(0.5 * len(label_idx)):int(0.75 * len(label_idx))]
if test_idx:
idx_test = label_idx[label_number:]
idx_val = idx_test
else:
idx_test = label_idx[int(0.75 * len(label_idx)):]
sens = idx_features_labels[sens_attr].values
sens_idx = set(np.where(sens >= 0)[0])
idx_test = np.asarray(list(sens_idx & set(idx_test)))
sens = torch.FloatTensor(sens)
idx_sens_train = list(sens_idx - set(idx_val) - set(idx_test))
random.seed(seed)
random.shuffle(idx_sens_train)
idx_sens_train = torch.LongTensor(idx_sens_train[:sens_number])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
# random.shuffle(sens_idx)
return adj, features, labels, idx_train, idx_val, idx_test, sens,idx_sens_train
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def feature_norm(features):
min_values = features.min(axis=0)[0]
max_values = features.max(axis=0)[0]
return 2*(features - min_values).div(max_values-min_values) - 1
def accuracy(output, labels):
output = output.squeeze()
preds = (output>0).type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def accuracy_softmax(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
#%%
#%%
def load_pokec_emb(dataset,sens_attr,predict_attr, path="../dataset/pokec/", label_number=1000,sens_number=500,seed=19,test_idx=False):
print('Loading {} dataset from {}'.format(dataset,path))
graph_embedding = np.genfromtxt(
os.path.join(path,"{}.embedding".format(dataset)),
skip_header=1,
dtype=float
)
embedding_df = pd.DataFrame(graph_embedding)
embedding_df[0] = embedding_df[0].astype(int)
embedding_df = embedding_df.rename(index=int, columns={0:"user_id"})
idx_features_labels = pd.read_csv(os.path.join(path,"{}.csv".format(dataset)))
idx_features_labels = pd.merge(idx_features_labels,embedding_df,how="left",on="user_id")
idx_features_labels = idx_features_labels.fillna(0)
#%%
header = list(idx_features_labels.columns)
header.remove("user_id")
header.remove(sens_attr)
header.remove(predict_attr)
features = sp.csr_matrix(idx_features_labels[header], dtype=np.float32)
labels = idx_features_labels[predict_attr].values
#%%
# build graph
idx = np.array(idx_features_labels["user_id"], dtype=int)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(os.path.join(path,"{}_relationship.txt".format(dataset)), dtype=int)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = adj + sp.eye(adj.shape[0])
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(labels)
import random
random.seed(seed)
label_idx = np.where(labels>=0)[0]
random.shuffle(label_idx)
idx_train = label_idx[:min(int(0.5 * len(label_idx)),label_number)]
idx_val = label_idx[int(0.5 * len(label_idx)):int(0.75 * len(label_idx))]
if test_idx:
idx_test = label_idx[label_number:]
else:
idx_test = label_idx[int(0.75 * len(label_idx)):]
sens = idx_features_labels[sens_attr].values
sens_idx = set(np.where(sens >= 0)[0])
idx_test = np.asarray(list(sens_idx & set(idx_test)))
sens = torch.FloatTensor(sens)
idx_sens_train = list(sens_idx - set(idx_val) - set(idx_test))
random.seed(seed)
random.shuffle(idx_sens_train)
idx_sens_train = torch.LongTensor(idx_sens_train[:sens_number])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test, sens, idx_sens_train | 8,676 | 33.84739 | 135 | py |
FairAC | FairAC-main/src/train_fairAC_GNN_report.py | import time
import argparse
import dgl
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn.functional as F
from utils import accuracy, load_pokec
from models.FairAC import FairAC2, GNN
def parser_args():
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=2000,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-5,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=128,
help='Number of hidden units of the sensitive attribute estimator')
parser.add_argument('--dropout', type=float, default=.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--lambda1', type=float, default=1.,
help='The hyperparameter of loss Lc')
parser.add_argument('--lambda2', type=float, default=1.,
help='The hyperparameter of loss Lt, i.e. beta in paper')
parser.add_argument('--model', type=str, default="GAT",
help='the type of model GCN/GAT')
parser.add_argument('--dataset', type=str, default='pokec_n',
choices=['pokec_z', 'pokec_n', 'nba'])
parser.add_argument('--num-hidden', type=int, default=64,
help='Number of hidden units of classifier.')
parser.add_argument("--num-heads", type=int, default=1,
help="number of hidden attention heads")
parser.add_argument("--num-out-heads", type=int, default=1,
help="number of output attention heads")
parser.add_argument("--num-layers", type=int, default=1,
help="number of hidden layers")
parser.add_argument("--residual", action="store_true", default=False,
help="use residual connection")
parser.add_argument("--attn-drop", type=float, default=.0,
help="attention dropout")
parser.add_argument('--negative-slope', type=float, default=0.2,
help="the negative slope of leaky relu")
parser.add_argument('--acc', type=float, default=0.688,
help='the selected FairGNN accuracy on val would be at least this high')
parser.add_argument('--roc', type=float, default=0.745,
help='the selected FairGNN ROC score on val would be at least this high')
parser.add_argument('--sens_number', type=int, default=200,
help="the number of sensitive attributes")
parser.add_argument('--label_number', type=int, default=500,
help="the number of labels")
parser.add_argument('--attn_vec_dim', type=int, default=128,
help="attention vector dim")
parser.add_argument('--num_heads', type=int, default=1,
help="the number of attention heads")
parser.add_argument('--feat_drop_rate', type=float, default=0.3,
help="feature dropout rate")
parser.add_argument('--num_sen_class', type=int, default=1,
help="number of sensitive classes")
parser.add_argument('--transformed_feature_dim', type=int, default=128,
help="transformed feature dimensions")
parser.add_argument('--sample_number', type=int, default=1000,
help="the number of samples for training")
parser.add_argument('--load', type=bool, default=False,
help="load AC model, use with AC_model_path")
parser.add_argument('--AC_model_path', type=str, default="./AC_model",
help="AC_model_path")
parser.add_argument('--GNN_model_path', type=str, default="./GNN_model",
help="GNN_model_path")
args = parser.parse_known_args()[0]
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(args)
return args
def fair_metric(output, idx, labels, sens):
val_y = labels[idx].cpu().numpy()
idx_s0 = sens.cpu().numpy()[idx.cpu().numpy()] == 0
idx_s1 = sens.cpu().numpy()[idx.cpu().numpy()] == 1
idx_s0_y1 = np.bitwise_and(idx_s0, val_y == 1)
idx_s1_y1 = np.bitwise_and(idx_s1, val_y == 1)
pred_y = (output[idx].squeeze() > 0).type_as(labels).cpu().numpy()
parity = abs(sum(pred_y[idx_s0]) / sum(idx_s0) - sum(pred_y[idx_s1]) / sum(idx_s1))
equality = abs(sum(pred_y[idx_s0_y1]) / sum(idx_s0_y1) - sum(pred_y[idx_s1_y1]) / sum(idx_s1_y1))
return parity, equality
def main():
args = parser_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
print(args.dataset)
if args.dataset != 'nba':
if args.dataset == 'pokec_z':
dataset = 'region_job'
embedding = np.load('pokec_z_embedding10.npy') # embeding is produced by Deep Walk
embedding = torch.tensor(embedding)
sens_attr = "region"
else:
dataset = 'region_job_2'
embedding = np.load('pokec_n_embedding10.npy') # embeding is produced by Deep Walk
embedding = torch.tensor(embedding)
sens_attr = "region"
predict_attr = "I_am_working_in_field"
label_number = args.label_number
sens_number = args.sens_number
seed = 20
path = "../dataset/pokec/"
test_idx = False
else:
dataset = 'nba'
sens_attr = "country"
predict_attr = "SALARY"
label_number = 100
sens_number = 50
seed = 42
path = "../dataset/NBA"
test_idx = True
embedding = np.load('nba_embedding10.npy') # embeding is produced by Deep Walk
embedding = torch.tensor(embedding)
print(dataset)
adj, features, labels, idx_train, _, idx_test, sens, _ = load_pokec(dataset,
sens_attr,
predict_attr,
path=path,
label_number=label_number,
sens_number=sens_number,
seed=seed, test_idx=test_idx)
# remove idx_test adj, features
exclude_test = torch.ones(adj.shape[1]).bool() # indices after removing idx_test
exclude_test[idx_test] = False
sub_adj = adj[exclude_test][:, exclude_test]
indices = []
counter = 0
for e in exclude_test:
indices.append(counter)
if e:
counter += 1
indices = torch.LongTensor(indices)
y_idx = indices[idx_train]
# ################ modification on dataset idx######################
print(len(idx_test))
from utils import feature_norm
# G = dgl.DGLGraph()
G = dgl.from_scipy(adj, device='cuda:0')
subG = dgl.from_scipy(sub_adj, device='cuda:0')
if dataset == 'nba':
features = feature_norm(features)
labels[labels > 1] = 1
if sens_attr:
sens[sens > 0] = 1
# Model and optimizer
adj_mat = adj.toarray()
adjTensor = torch.FloatTensor(adj_mat)
sub_nodes = np.array_split(range(features.shape[0]), 4)
sub_nodes = [torch.tensor(s).cuda() for s in sub_nodes]
transformed_feature_dim = args.transformed_feature_dim
GNNmodel = GNN(nfeat=transformed_feature_dim, args=args)
ACmodel = FairAC2(feature_dim=features.shape[1],transformed_feature_dim=transformed_feature_dim, emb_dim=embedding.shape[1], args=args)
if args.load:
ACmodel = torch.load(args.AC_model_path)
GNNmodel = torch.load(args.GNN_model_path)
# mdotodel.estimator.load_state_dict(torch.load("./checkpoint/GCN_sens_{}_ns_{}".format(dataset, sens_number)))
if args.cuda:
GNNmodel.cuda()
ACmodel.cuda()
embedding = embedding.cuda()
features = features.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
sens = sens.cuda()
# fair sub graph adj for all graph
subgraph_adj_list = []
feat_keep_idx_sub_list = []
feat_drop_idx_sub_list = []
for sub_node in sub_nodes:
feat_keep_idx_sub, feat_drop_idx_sub = train_test_split(np.arange(len(sub_node)),
test_size=args.feat_drop_rate)
feat_keep_idx_sub_list.append(feat_keep_idx_sub)
feat_drop_idx_sub_list.append(feat_drop_idx_sub)
subgraph_adj = adjTensor[sub_node][:, sub_node][:, feat_keep_idx_sub]
subgraph_adj_list.append(subgraph_adj)
from sklearn.metrics import roc_auc_score
# Train model
t_total = time.time()
best_result = {}
best_fair = 100
best_acc = 0
best_auc = 0
best_ar = 0
best_ars_result = {}
features_embedding = torch.zeros((features.shape[0], transformed_feature_dim)).cuda()
for epoch in range(args.epochs):
t = time.time()
GNNmodel.train()
ACmodel.train()
GNNmodel.optimizer_G.zero_grad()
ACmodel.optimizer_AC.zero_grad()
ACmodel.optimizer_S.zero_grad()
if epoch < args.epochs and not args.load:
# define train dataset, using the sub_nodes[0][feat_keep_idx_sub], which are fully labeled
ac_train_idx = sub_nodes[0][feat_keep_idx_sub_list[0]][:args.sample_number]
# ac_train_idx = sub_nodes[epoch%len(sub_nodes)][feat_keep_idx_sub_list[epoch%len(sub_nodes)]][:1000]
feat_keep_idx, feat_drop_idx = train_test_split(np.arange(ac_train_idx.shape[0]),
test_size=args.feat_drop_rate)
features_train = features[ac_train_idx]
sens_train = sens[ac_train_idx]
training_adj = adjTensor[ac_train_idx][:, ac_train_idx][:, feat_keep_idx].cuda()
feature_src_re2, features_hat, transformed_feature = ACmodel(training_adj, embedding[ac_train_idx], embedding[ac_train_idx][feat_keep_idx],
features_train[feat_keep_idx])
loss_ac = ACmodel.loss(features_train[feat_drop_idx], feature_src_re2[feat_drop_idx, :])
loss_reconstruction = F.pairwise_distance(features_hat, features_train[feat_keep_idx],2).mean()
# base AC finished###############
# pretrain AC model
if epoch < 200:
# ###############pretrain AC model ##########################
print("Epoch: {:04d}, loss_ac: {:.4f},loss_reconstruction: {:.4f}"
.format(epoch, loss_ac.item(), loss_reconstruction.item()))
AC_loss = loss_reconstruction + loss_ac
AC_loss.backward()
ACmodel.optimizer_AC.step()
continue
# mitigate unfairness loss
transformed_feature_detach = transformed_feature.detach()
sens_prediction_detach = ACmodel.sensitive_pred(transformed_feature_detach)
criterion = torch.nn.BCEWithLogitsLoss()
# only update sensitive classifier
Csen_loss = criterion(sens_prediction_detach, sens_train[feat_keep_idx].unsqueeze(1).float())
# sensitive optimizer.step
Csen_loss.backward()
ACmodel.optimizer_S.step()
feature_src_re2[feat_keep_idx] = transformed_feature
sens_prediction = ACmodel.sensitive_pred(feature_src_re2[feat_drop_idx])
sens_confusion = torch.ones(sens_prediction.shape, device=sens_prediction.device, dtype=torch.float32) / 2
Csen_adv_loss = criterion(sens_prediction, sens_confusion)
sens_prediction_keep = ACmodel.sensitive_pred(transformed_feature)
Csen_loss = criterion(sens_prediction_keep, sens_train[feat_keep_idx].unsqueeze(1).float())
# sensitive optimizer.step
# AC optimizer.step
AC_loss = args.lambda2*(Csen_adv_loss -Csen_loss)+loss_reconstruction + args.lambda1*loss_ac
AC_loss.backward()
ACmodel.optimizer_AC.step()
if epoch < args.epochs and epoch % 100 == 0:
print("Epoch: {:04d}, loss_ac: {:.4f}, loss_reconstruction: {:.4f}, Csen_loss: {:.4}, Csen_adv_loss: {:.4f}"
.format(epoch, loss_ac.item(), loss_reconstruction.item(), Csen_loss.item(), Csen_adv_loss.item()
))
if epoch > 1000 and epoch % 200 == 0 or epoch == args.epochs-1:
with torch.no_grad():
# ############# Attribute completion over graph######################
for i, sub_node in enumerate(sub_nodes):
feat_keep_idx_sub = feat_keep_idx_sub_list[i]
feat_drop_idx_sub = feat_drop_idx_sub_list[i]
feature_src_AC, features_hat, transformed_feature = ACmodel(subgraph_adj_list[i].cuda(),
embedding[sub_node],
embedding[sub_node][
feat_keep_idx_sub],
features[sub_node][
feat_keep_idx_sub])
features_embedding[sub_node[feat_drop_idx_sub]] = feature_src_AC[feat_drop_idx_sub]
features_embedding[sub_node[feat_keep_idx_sub]] = transformed_feature
GNNmodel_inside = GNN(nfeat=transformed_feature_dim, args=args).cuda()
GNNmodel_inside.train()
for sub_epoch in range(1000):
features_embedding_exclude_test = features_embedding[exclude_test].detach()
feat_emb, y = GNNmodel_inside(subG, features_embedding_exclude_test)
Cy_loss = GNNmodel_inside.criterion(y[y_idx], labels[idx_train].unsqueeze(1).float())
GNNmodel_inside.optimizer_G.zero_grad()
Cy_loss.backward()
GNNmodel_inside.optimizer_G.step()
if args.load:
loss_ac = torch.zeros(1)
loss_reconstruction = torch.zeros(1)
Csen_loss = torch.zeros(1)
Csen_adv_loss = torch.zeros(1)
if sub_epoch % 100 == 0:
print(
"Epoch: {:04d}, sub_epoch: {:04d}, loss_ac: {:.4f}, loss_reconstruction: {:.4f}, Csen_loss: {:.4}, Csen_adv_loss: {:.4f}, Cy_loss: {:.4f}"
.format(epoch, sub_epoch, loss_ac.item(), loss_reconstruction.item(), Csen_loss.item(),
Csen_adv_loss.item(),
Cy_loss.item()))
##################### training finished ###################################
cls_loss = Cy_loss
GNNmodel_inside.eval()
ACmodel.eval()
with torch.no_grad():
_, output = GNNmodel_inside(G, features_embedding)
acc_test = accuracy(output[idx_test], labels[idx_test])
roc_test = roc_auc_score(labels[idx_test].cpu().numpy(),
output[idx_test].detach().cpu().numpy())
parity, equality = fair_metric(output, idx_test, labels, sens)
# if acc_val > args.acc and roc_val > args.roc:
if best_acc <= acc_test:
best_acc = acc_test
best_acc_result = {}
best_acc_result['acc'] = acc_test.item()
best_acc_result['roc'] = roc_test
best_acc_result['parity'] = parity
best_acc_result['equality'] = equality
best_ars_result['best_acc_result'] = best_acc_result
if best_auc <= roc_test:
best_auc = roc_test
best_auc_result = {}
best_auc_result['acc'] = acc_test.item()
best_auc_result['roc'] = roc_test
best_auc_result['parity'] = parity
best_auc_result['equality'] = equality
best_ars_result['best_auc_result'] = best_auc_result
if best_ar <= roc_test + acc_test:
best_ar = roc_test + acc_test
best_ar_result = {}
best_ar_result['acc'] = acc_test.item()
best_ar_result['roc'] = roc_test
best_ar_result['parity'] = parity
best_ar_result['equality'] = equality
best_ars_result['best_ar_result'] = best_ar_result
if acc_test > args.acc and roc_test > args.roc:
if best_fair > parity + equality:
best_fair = parity + equality
best_result['acc'] = acc_test.item()
best_result['roc'] = roc_test
best_result['parity'] = parity
best_result['equality'] = equality
torch.save(GNNmodel_inside, "GNNinside_epoch{:04d}_acc{:.4f}_roc{:.4f}_par{:.4f}_eq_{:.4f}".format(epoch,
acc_test.item(),
roc_test
,
parity,
equality))
torch.save(ACmodel,
"ACmodelinside_epoch{:04d}_acc{:.4f}_roc{:.4f}_par{:.4f}_eq_{:.4f}".format(epoch,
acc_test.item(),
roc_test
, parity,
equality))
print("=================================")
log = "Epoch: {:04d}, loss_ac: {:.4f}, loss_reconstruction: {:.4f}, Csen_loss: {:.4}, Csen_adv_loss: {:.4f}, cls: {:.4f}" \
.format(epoch, loss_ac.item(), loss_reconstruction.item(), Csen_loss.item(),
Csen_adv_loss.item(), cls_loss.item())
with open('log.txt', 'a') as f:
f.write(log)
print("Test:",
"accuracy: {:.4f}".format(acc_test.item()),
"roc: {:.4f}".format(roc_test),
"parity: {:.4f}".format(parity),
"equality: {:.4f}".format(equality))
log = 'Test: accuracy: {:.4f} roc: {:.4f} parity: {:.4f} equality: {:.4f}\n' \
.format(acc_test.item(), roc_test, parity, equality)
with open('log.txt', 'a') as f:
f.write(log)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
print('============performace on test set=============')
print(best_ars_result)
with open('log.txt', 'a') as f:
f.write(str(best_ars_result))
if len(best_result) > 0:
log = "Test: accuracy: {:.4f}, roc: {:.4f}, parity: {:.4f}, equality: {:.4f}"\
.format(best_result['acc'],best_result['roc'], best_result['parity'],best_result['equality'])
with open('log.txt', 'a') as f:
f.write(log)
print("Test:",
"accuracy: {:.4f}".format(best_result['acc']),
"roc: {:.4f}".format(best_result['roc']),
"parity: {:.4f}".format(best_result['parity']),
"equality: {:.4f}".format(best_result['equality']))
else:
print("Please set smaller acc/roc thresholds")
if __name__ == '__main__':
main()
| 21,680 | 50.376777 | 162 | py |
FairAC | FairAC-main/src/models/HGNN_AC.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class HGNN_AC(nn.Module):
def __init__(self, in_dim, hidden_dim, dropout, activation, num_heads, cuda=False):
super(HGNN_AC, self).__init__()
self.dropout = dropout
self.attentions = [AttentionLayer(in_dim, hidden_dim, dropout, activation, cuda) for _ in range(num_heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, bias, emb_dest, emb_src, feature_src):
adj = F.dropout(bias, self.dropout, training=self.training)
x = torch.cat([att(adj, emb_dest, emb_src, feature_src).unsqueeze(0) for att in self.attentions], dim=0)
return torch.mean(x, dim=0, keepdim=False)
class AttentionLayer(nn.Module):
def __init__(self, in_dim, hidden_dim, dropout, activation, cuda=False):
super(AttentionLayer, self).__init__()
self.dropout = dropout
self.activation = activation
self.is_cuda = cuda
self.W = nn.Parameter(nn.init.xavier_normal_(
torch.Tensor(in_dim, hidden_dim).type(torch.cuda.FloatTensor if cuda else torch.FloatTensor),
gain=np.sqrt(2.0)), requires_grad=True)
self.W2 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(hidden_dim, hidden_dim).type(
torch.cuda.FloatTensor if cuda else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.leakyrelu = nn.LeakyReLU(0.2)
def forward(self, bias, emb_dest, emb_src, feature_src):
h_1 = torch.mm(emb_src, self.W)
h_2 = torch.mm(emb_dest, self.W)
e = self.leakyrelu(torch.mm(torch.mm(h_2, self.W2), h_1.t()))
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(bias > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, feature_src)
return self.activation(h_prime)
| 2,074 | 38.903846 | 115 | py |
FairAC | FairAC-main/src/models/GCN.py | import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.body = GCN_Body(nfeat,nhid,dropout)
self.fc = nn.Linear(nhid,nclass)
def forward(self, g, x):
x = self.body(g,x)
x = self.fc(x)
return x
# def GCN(nn.Module):
class GCN_Body(nn.Module):
def __init__(self, nfeat, nhid, dropout):
super(GCN_Body, self).__init__()
self.gc1 = GraphConv(nfeat, nhid)
self.gc2 = GraphConv(nhid, nhid)
self.dropout = nn.Dropout(dropout)
def forward(self, g, x):
x = F.relu(self.gc1(g, x))
x = self.dropout(x)
x = self.gc2(g, x)
# x = self.dropout(x)
return x
| 830 | 22.742857 | 53 | py |
FairAC | FairAC-main/src/models/FairGNN.py | import random
import torch.nn as nn
from .GCN import GCN,GCN_Body
from .GAT import GAT,GAT_body
from .SAGE import SAGE_Body
from .HGNN_AC import HGNN_AC
import torch
import torch.nn.functional as F
import numpy as np
def get_model(nfeat, args):
if args.model == "GCN":
model = GCN_Body(nfeat,args.num_hidden,args.dropout)
elif args.model == "GAT":
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT_body(args.num_layers,nfeat,args.num_hidden,heads,args.dropout,args.attn_drop,args.negative_slope,args.residual)
elif args.model == "SAGE":
model = SAGE_Body(nfeat, args.num_hidden, args.dropout)
else:
print("Model not implement")
return
return model
class FairGNN(nn.Module):
def __init__(self, nfeat, args):
super(FairGNN,self).__init__()
nhid = args.num_hidden
dropout = args.dropout
self.estimator = GCN(nfeat,args.hidden,1,dropout)
self.GNN = get_model(nfeat,args)
self.classifier = nn.Linear(nhid,1)
self.adv = nn.Linear(nhid,1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters()) + list(self.estimator.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr = args.lr, weight_decay = args.weight_decay)
self.optimizer_A = torch.optim.Adam(self.adv.parameters(), lr = args.lr, weight_decay = args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self,g,x):
s = self.estimator(g,x)
z = self.GNN(g,x)
y = self.classifier(z)
return y,s
def optimize(self,g,x,labels,idx_train,sens,idx_sens_train):
self.train()
### update E, G
self.adv.requires_grad_(False)
self.optimizer_G.zero_grad()
s = self.estimator(g,x)
h = self.GNN(g,x)
y = self.classifier(h)
s_g = self.adv(h)
s_score = torch.sigmoid(s.detach())
# s_score = (s_score > 0.5).float()
s_score[idx_sens_train]=sens[idx_sens_train].unsqueeze(1).float()
y_score = torch.sigmoid(y)
self.cov = torch.abs(torch.mean((s_score - torch.mean(s_score)) * (y_score - torch.mean(y_score))))
self.cls_loss = self.criterion(y[idx_train],labels[idx_train].unsqueeze(1).float())
self.adv_loss = self.criterion(s_g,s_score)
self.G_loss = self.cls_loss + self.args.alpha * self.cov - self.args.beta * self.adv_loss
self.G_loss.backward()
self.optimizer_G.step()
## update Adv
self.adv.requires_grad_(True)
self.optimizer_A.zero_grad()
s_g = self.adv(h.detach())
self.A_loss = self.criterion(s_g,s_score)
self.A_loss.backward()
self.optimizer_A.step()
class FairGnn(nn.Module):
def __init__(self, nfeat, args):
super(FairGnn, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
self.classifierSen = nn.Linear(nhid, 1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
s = self.classifierSen(z)
return z, y, s
# only has a attention attribute completion model, without autoencoder.
class ClassicAC(nn.Module):
def __init__(self, emb_dim, args):
super(ClassicAC, self).__init__()
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
self.optimizer_AC = torch.optim.Adam(self.hgnn_ac.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
feature_src)
return feature_src_re, None
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(origin_feature, AC_feature, 2).mean()
# baseAC, used autoencoder to improve performance.
class BaseAC(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(BaseAC, self).__init__()
self.fc = torch.nn.Linear(feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.fc.parameters()) + list(self.fcdecoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
transformed_features = self.fc(feature_src)
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
transformed_features)
feature_hat = self.fcdecoder(transformed_features)
return feature_src_re, feature_hat
def feature_transform(self, features):
return self.fc(features)
def feature_decoder(self, transformed_features):
return self.fcdecoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.fc(origin_feature), AC_feature, 2).mean()
# Fair AC using Fair select approach. done
class FairSelectAC(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(FairSelectAC, self).__init__()
self.fc = torch.nn.Linear(feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.fc.parameters()) + list(self.fcdecoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src, fairadj = False):
if not fairadj:
fair_adj = self.fair_select(bias,feature_src)
else:
fair_adj = bias
transformed_features = self.fc(feature_src)
feature_src_re = self.hgnn_ac(fair_adj,
emb_dest, emb_src,
transformed_features)
feature_hat = self.fcdecoder(transformed_features)
return feature_src_re, feature_hat
def fair_select(self, adj, feature_with_sens):
sens = feature_with_sens[:,-1] + 1 # covert 0 to 1, 1 to 2. in case adj is 0 which cause wrong counter.
sens_num_class = len(torch.unique(sens))
for idx,row in enumerate(adj):
sens_counter = [0] * (sens_num_class+1)
sen_row = (row*sens).long()
sen_row_array = np.array(sen_row.cpu())
for i in range(sens_num_class+1):
sens_counter[i] = np.count_nonzero(sen_row_array == i)
# for i in sen_row:
# sens_counter[i] += 1
sens_counter.remove(sens_counter[0]) # ignore 0, which means the number of no edges nodes pairs
# fint the min sens_counter that greater than 0
least_num_sens_class = max(sens_counter)
for counter in sens_counter:
if counter > 0 and counter < least_num_sens_class:
least_num_sens_class = counter
remove_number = [max(counter - least_num_sens_class,0) for counter in sens_counter] # number of edges per class that need to remove to keep fair
for i,number in enumerate(remove_number):
if(number > 0):
sen_class = i+1
sens_idx = np.where(sen_row.cpu() == sen_class)[0]
drop_idx = torch.tensor(random.sample(list(sens_idx), number)).long()
adj[idx][drop_idx] = 0
return adj
def feature_transform(self, features):
return self.fc(features)
def feature_decoder(self, transformed_features):
return self.fcdecoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.fc(origin_feature), AC_feature, 2).mean()
class FairAC_GNN(nn.Module):
def __init__(self, nfeat,transformed_feature_dim,emb_dim, args):
super(FairAC_GNN, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
self.classifierSen = nn.Linear(nhid, 1)
self.ACmodel = BaseAC(nfeat,transformed_feature_dim, emb_dim,args)
G_params = list(self.ACmodel.parameters()) + list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
s = self.classifierSen(z)
return z, y, s
def feature_transform(self, features):
return self.ACmodel.feature_transform(features)
def feature_decoder(self, transformed_features):
return self.ACmodel.feature_decoder(transformed_features)
| 10,512 | 39.279693 | 159 | py |
FairAC | FairAC-main/src/models/FairAC.py | import random
import torch.nn as nn
from .GCN import GCN,GCN_Body
from .GAT import GAT,GAT_body
from .SAGE import SAGE_Body
from .HGNN_AC import HGNN_AC
import torch
import torch.nn.functional as F
import numpy as np
def get_model(nfeat, args):
if args.model == "GCN":
model = GCN_Body(nfeat,args.num_hidden,args.dropout)
elif args.model == "GAT":
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT_body(args.num_layers,nfeat,args.num_hidden,heads,args.dropout,args.attn_drop,args.negative_slope,args.residual)
elif args.model == "SAGE":
model = SAGE_Body(nfeat, args.num_hidden, args.dropout)
else:
print("Model not implement")
return
return model
class GNN(nn.Module):
def __init__(self, nfeat, args):
super(GNN, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
return z, y
class FairGnn(nn.Module):
def __init__(self, nfeat, args):
super(FairGnn, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
self.classifierSen = nn.Linear(nhid, 1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
s = self.classifierSen(z)
return z, y, s
# baseAC, used autoencoder to improve performance.
class BaseAC(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(BaseAC, self).__init__()
self.fc = torch.nn.Linear(feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.fc.parameters()) + list(self.fcdecoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
transformed_features = self.fc(feature_src)
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
transformed_features)
feature_hat = self.fcdecoder(transformed_features)
return feature_src_re, feature_hat
def feature_transform(self, features):
return self.fc(features)
def feature_decoder(self, transformed_features):
return self.fcdecoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.fc(origin_feature), AC_feature, 2).mean()
class FairAC2(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(FairAC2, self).__init__()
self.fc = torch.nn.Linear(feature_dim, 2*transformed_feature_dim)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(2*transformed_feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
nn.init.xavier_normal_(self.fc2.weight, gain=1.414)
self.encoder = torch.nn.Sequential(self.fc, self.relu, self.fc2)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, transformed_feature_dim*2)
self.relu2 = torch.nn.ReLU()
self.fcdecoder2 = torch.nn.Linear(transformed_feature_dim*2, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
nn.init.xavier_normal_(self.fcdecoder2.weight, gain=1.414)
self.decoder = torch.nn.Sequential(self.fcdecoder, self.relu2, self.fcdecoder2)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
# divide AC_params into two parts.
AE_params = list(self.encoder.parameters()) + list(self.decoder.parameters())
self.optimizer_AE = torch.optim.Adam(AE_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_AConly = torch.optim.Adam(self.hgnn_ac.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.classifierSen = nn.Linear(transformed_feature_dim, args.num_sen_class)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
transformed_features = self.encoder(feature_src)
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
transformed_features)
feature_hat = self.decoder(transformed_features)
return feature_src_re, feature_hat, transformed_features
def sensitive_pred(self, transformed_features):
return self.classifierSen(transformed_features)
def feature_transform(self, features):
return self.encoder(features)
def feature_decoder(self, transformed_features):
return self.decoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.encoder(origin_feature).detach(), AC_feature, 2).mean()
class AverageAC(nn.Module):
def __init__(self):
super(AverageAC, self).__init__()
def forward(self, adj, feature_src):
degree = [max(1,adj[i].sum().item()) for i in range(adj.shape[0])]
mean_adj = torch.stack([adj[i]/degree[i] for i in range(adj.shape[0])])
feature_src_re = mean_adj.matmul(feature_src)
return feature_src_re
| 6,883 | 40.97561 | 131 | py |
FairAC | FairAC-main/src/models/SAGE.py | import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import SAGEConv
class SAGE(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(SAGE, self).__init__()
self.body = SAGE_Body(nfeat,nhid,dropout)
self.fc = nn.Linear(nhid,nclass)
def forward(self, g, x):
x = self.body(g,x)
x = self.fc(x)
return x
# def GCN(nn.Module):
class SAGE_Body(nn.Module):
def __init__(self, nfeat, nhid, dropout):
super(SAGE_Body, self).__init__()
self.gc1 = SAGEConv(nfeat, nhid, 'mean')
self.gc2 = SAGEConv(nhid, nhid, 'mean')
self.dropout = nn.Dropout(dropout)
def forward(self, g, x):
x = F.relu(self.gc1(g, x))
x = self.dropout(x)
x = self.gc2(g, x)
# x = self.dropout(x)
return x
| 848 | 23.257143 | 53 | py |
FairAC | FairAC-main/src/models/GAT.py | import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
class GAT_body(nn.Module):
def __init__(self,
num_layers,
in_dim,
num_hidden,
heads,
feat_drop,
attn_drop,
negative_slope,
residual):
super(GAT_body, self).__init__()
self.num_layers = num_layers
self.gat_layers = nn.ModuleList()
self.activation = F.elu
# input projection (no residual)
self.gat_layers.append(GATConv(
in_dim, num_hidden, heads[0],
feat_drop, attn_drop, negative_slope, False, self.activation))
# hidden layers
for l in range(1, num_layers):
# due to multi-head, the in_dim = num_hidden * num_heads
self.gat_layers.append(GATConv(
num_hidden * heads[l-1], num_hidden, heads[l],
feat_drop, attn_drop, negative_slope, residual, self.activation))
# output projection
self.gat_layers.append(GATConv(
num_hidden * heads[-2], num_hidden, heads[-1],
feat_drop, attn_drop, negative_slope, residual, None))
def forward(self, g, inputs):
h = inputs
for l in range(self.num_layers):
h = self.gat_layers[l](g, h).flatten(1)
# output projection
logits = self.gat_layers[-1](g, h).mean(1)
return logits
class GAT(nn.Module):
def __init__(self,
num_layers,
in_dim,
num_hidden,
num_classes,
heads,
feat_drop,
attn_drop,
negative_slope,
residual):
super(GAT, self).__init__()
self.body = GAT_body(num_layers, in_dim, num_hidden, heads, feat_drop, attn_drop, negative_slope, residual)
self.fc = nn.Linear(num_hidden,num_classes)
def forward(self, g, inputs):
logits = self.body(g,inputs)
logits = self.fc(logits)
return logits | 2,108 | 33.57377 | 115 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/test.py | import argparse
import importlib
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.utils import (get_root_logger, dist_forward_collect,
nondist_forward_collect, traverse_replace)
def single_gpu_test(model, data_loader):
model.eval()
func = lambda **x: model(mode='test', **x)
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def multi_gpu_test(model, data_loader):
model.eval()
func = lambda **x: model(mode='test', **x)
rank, world_size = get_dist_info()
results = dist_forward_collect(func, data_loader, rank,
len(data_loader.dataset))
return results
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
cfg.model.pretrained = None # ensure to use checkpoint rather than pretraining
# check memcached package exists
if importlib.util.find_spec('mc') is None:
traverse_replace(cfg, 'memcached', False)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# logger
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'test_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# build the dataloader
dataset = build_dataset(cfg.data.val)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=cfg.data.imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_model(cfg.model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader) # dict{key: np.ndarray}
rank, _ = get_dist_info()
if rank == 0:
for name, val in outputs.items():
dataset.evaluate(
torch.from_numpy(val), name, logger, topk=(1, 5))
if __name__ == '__main__':
main()
| 3,944 | 31.073171 | 83 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/extract.py | import argparse
import importlib
import numpy as np
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.utils import dist_forward_collect, nondist_forward_collect
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.models.utils import MultiPooling
from openselfsup.utils import get_root_logger
class ExtractProcess(object):
def __init__(self,
pool_type='specified',
backbone='resnet50',
layer_indices=(0, 1, 2, 3, 4)):
self.multi_pooling = MultiPooling(
pool_type, in_indices=layer_indices, backbone=backbone)
def _forward_func(self, model, **x):
backbone_feats = model(mode='extract', **x)
pooling_feats = self.multi_pooling(backbone_feats)
flat_feats = [xx.view(xx.size(0), -1) for xx in pooling_feats]
feat_dict = {'feat{}'.format(i + 1): feat.cpu() \
for i, feat in enumerate(flat_feats)}
return feat_dict
def extract(self, model, data_loader, distributed=False):
model.eval()
func = lambda **x: self._forward_func(model, **x)
if distributed:
rank, world_size = get_dist_info()
results = dist_forward_collect(func, data_loader, rank,
len(data_loader.dataset))
else:
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def parse_args():
parser = argparse.ArgumentParser(
description='OpenSelfSup extract features of a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
parser.add_argument(
'--pretrained', default='random',
help='pretrained model file, exclusive to --checkpoint')
parser.add_argument(
'--dataset-config',
default='benchmarks/extract_info/voc07.py',
help='extract dataset config file path')
parser.add_argument(
'--layer-ind',
type=str,
help='layer indices, separated by comma, e.g., "0,1,2,3,4"')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
layer_ind = [int(idx) for idx in args.layer_ind.split(',')]
cfg.model.backbone.out_indices = layer_ind
# checkpoint and pretrained are exclusive
assert args.pretrained == "random" or args.checkpoint is None, \
"Checkpoint and pretrained are exclusive."
# check memcached package exists
if importlib.util.find_spec('mc') is None:
for field in ['train', 'val', 'test']:
if hasattr(cfg.data, field):
getattr(cfg.data, field).data_source.memcached = False
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# logger
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'extract_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# build the dataloader
dataset_cfg = mmcv.Config.fromfile(args.dataset_config)
dataset = build_dataset(dataset_cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=dataset_cfg.data.imgs_per_gpu,
workers_per_gpu=dataset_cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# specify pretrained model
if args.pretrained != 'random':
assert isinstance(args.pretrained, str)
cfg.model.pretrained = args.pretrained
# build the model and load checkpoint
model = build_model(cfg.model)
if args.checkpoint is not None:
logger.info("Use checkpoint: {} to extract features".format(
args.checkpoint))
load_checkpoint(model, args.checkpoint, map_location='cpu')
elif args.pretrained != "random":
logger.info('Use pretrained model: {} to extract features'.format(
args.pretrained))
else:
logger.info('No checkpoint or pretrained is give, use random init.')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# build extraction processor
extractor = ExtractProcess(
pool_type='specified', backbone='resnet50', layer_indices=layer_ind)
# run
outputs = extractor.extract(model, data_loader, distributed=distributed)
rank, _ = get_dist_info()
mmcv.mkdir_or_exist("{}/features/".format(args.work_dir))
if rank == 0:
for key, val in outputs.items():
split_num = len(dataset_cfg.split_name)
split_at = dataset_cfg.split_at
for ss in range(split_num):
output_file = "{}/features/{}_{}.npy".format(
args.work_dir, dataset_cfg.split_name[ss], key)
if ss == 0:
np.save(output_file, val[:split_at[0]])
elif ss == split_num - 1:
np.save(output_file, val[split_at[-1]:])
else:
np.save(output_file, val[split_at[ss - 1]:split_at[ss]])
if __name__ == '__main__':
main()
| 6,703 | 35.63388 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/upgrade_models.py | import torch
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--save-path', type=str, required=True, help='destination file name')
args = parser.parse_args()
return args
def main():
args = parse_args()
ck = torch.load(args.checkpoint, map_location=torch.device('cpu'))
output_dict = dict(state_dict=dict(), author='OpenSelfSup')
for key, value in ck.items():
if key.startswith('head'):
continue
else:
output_dict['state_dict'][key] = value
torch.save(output_dict, args.save_path)
if __name__ == '__main__':
main()
| 712 | 24.464286 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/extract_backbone_weights.py | import torch
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='This script extracts backbone weights from a checkpoint')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'output', type=str, help='destination file name')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.output.endswith(".pth")
ck = torch.load(args.checkpoint, map_location=torch.device('cpu'))
output_dict = dict(state_dict=dict(), author="OpenSelfSup")
has_backbone = False
for key, value in ck['state_dict'].items():
if key.startswith('backbone'):
output_dict['state_dict'][key[9:]] = value
has_backbone = True
if not has_backbone:
raise Exception("Cannot find a backbone module in the checkpoint.")
torch.save(output_dict, args.output)
if __name__ == '__main__':
main()
| 952 | 28.78125 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/train.py | from __future__ import division
import argparse
import importlib
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from openselfsup import __version__
from openselfsup.apis import set_random_seed, train_model
from openselfsup.datasets import build_dataset
from openselfsup.models import build_model
from openselfsup.utils import collect_env, get_root_logger, traverse_replace
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--pretrained', default=None, help='pretrained model file')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('-d','--dev', default=False,action='store_true')
parser.add_argument('-c','--continue_training', default=False,action='store_true')
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.continue_training:
cfg.resume_from = osp.join(cfg.work_dir, 'latest.pth')
if args.dev:
cfg['data']['imgs_per_gpu']=16
# check memcached package exists
if importlib.util.find_spec('mc') is None:
traverse_replace(cfg, 'memcached', False)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
assert cfg.model.type not in \
['DeepCluster', 'MOCO', 'SimCLR', 'ODC', 'NPID'], \
"{} does not support non-dist training.".format(cfg.model.type)
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'train_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
if args.pretrained is not None:
assert isinstance(args.pretrained, str)
cfg.model.pretrained = args.pretrained
model = build_model(cfg.model)
datasets = [build_dataset(cfg.data.train)]
assert len(cfg.workflow) == 1, "Validation is called by hook."
if cfg.checkpoint_config is not None:
# save openselfsup version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
openselfsup_version=__version__, config=cfg.text)
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 5,150 | 33.112583 | 86 | py |
Few-shot-WSI | Few-shot-WSI-master/wsi_workdir/extract.py | import argparse
import importlib
import numpy as np
import os
import os.path as osp
import time
from tqdm import trange,tqdm
import threading
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.utils import dist_forward_collect, nondist_forward_collect
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.models.utils import MultiPooling
from openselfsup.utils import get_root_logger
from torch import nn
import argparse
def nondist_forward_collect(func, data_loader, length):
results = []
prog_bar = mmcv.ProgressBar(len(data_loader))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = func(**data)
results.append(result)
prog_bar.update()
results_all = {}
for k in results[0].keys():
results_all[k] = np.concatenate(
[batch[k].numpy() for batch in results], axis=0)
assert results_all[k].shape[0] == length
return results_all
def extract(model, data_loader):
model.eval()
func = lambda **x: model(mode='extract', **x)
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def main(args):
config_file = args.config
cfg = mmcv.Config.fromfile(config_file)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
dataset = build_dataset(cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=cfg.data.imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
cfg.model.pretrained = args.pretrained
model = build_model(cfg.model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
func = lambda **x: model(mode='extract', **x)
result_dict = extract(model, data_loader)
features = result_dict['backbone']
np.save(args.output, features)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='extract dataset features using pretrained ')
parser.add_argument('--pretrained', type=str, required=True, help='path to pretrained model')
parser.add_argument('--config', type=str, required=True, help='path to data root')
parser.add_argument('--output', type=str, required=True, help='output path')
parser.add_argument('--start', type=int, required=False)
args = parser.parse_args()
main(args)
exit()
## extract augmented features
config_file = args.config
cfg = mmcv.Config.fromfile(config_file)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
dataset = build_dataset(cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=cfg.data.imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
cfg.model.pretrained = args.pretrained
model = build_model(cfg.model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
func = lambda **x: model(mode='extract', **x)
def extract_and_save(idxs):
for idx in tqdm(idxs):
result_dict = nondist_forward_collect(func, data_loader, len(data_loader.dataset))
features = result_dict['backbone']
np.save(f'wsi_workdir/workdir/extracted_feats/moco_v3_wo_78/NCT_aug/NCT_aug_{idx}.npy', features)
print('saving', idx)
extract_and_save(np.arange(args.start, args.start+25))
| 3,789 | 29.564516 | 109 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/apis/train.py | import random
import re
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, Runner, obj_from_dict
from openselfsup.datasets import build_dataloader
from openselfsup.hooks import build_hook, DistOptimizerHook
from openselfsup.utils import get_root_logger, optimizers, print_log
try:
import apex
except:
print('apex is not installed')
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def parse_losses(losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
'{} is not a tensor or list of tensors'.format(loss_name))
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def batch_processor(model, data, train_mode):
"""Process a data batch.
This method is required as an argument of Runner, which defines how to
process a data batch and obtain proper outputs. The first 3 arguments of
batch_processor are fixed.
Args:
model (nn.Module): A PyTorch model.
data (dict): The data batch in a dict.
train_mode (bool): Training mode or not. It may be useless for some
models.
Returns:
dict: A dict containing losses and log vars.
"""
losses = model(**data)
loss, log_vars = parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def train_model(model,
dataset,
cfg,
distributed=False,
timestamp=None,
meta=None):
logger = get_root_logger(cfg.log_level)
# start training
if distributed:
_dist_train(
model, dataset, cfg, logger=logger, timestamp=timestamp, meta=meta)
else:
_non_dist_train(
model, dataset, cfg, logger=logger, timestamp=timestamp, meta=meta)
def build_optimizer(model, optimizer_cfg):
"""Build optimizer from configs.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are:
- type: class name of the optimizer.
- lr: base learning rate.
Optional fields are:
- any arguments of the corresponding optimizer type, e.g.,
weight_decay, momentum, etc.
- paramwise_options: a dict with regular expression as keys
to match parameter names and a dict containing options as
values. Options include 6 fields: lr, lr_mult, momentum,
momentum_mult, weight_decay, weight_decay_mult.
Returns:
torch.optim.Optimizer: The initialized optimizer.
Example:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> paramwise_options = {
>>> '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1),
>>> '\Ahead.': dict(lr_mult=10, momentum=0)}
>>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
>>> weight_decay=0.0001,
>>> paramwise_options=paramwise_options)
>>> optimizer = build_optimizer(model, optimizer_cfg)
"""
if hasattr(model, 'module'):
model = model.module
optimizer_cfg = optimizer_cfg.copy()
paramwise_options = optimizer_cfg.pop('paramwise_options', None)
# if no paramwise option is specified, just use the global setting
if paramwise_options is None:
return obj_from_dict(optimizer_cfg, optimizers,
dict(params=model.parameters()))
else:
assert isinstance(paramwise_options, dict)
params = []
for name, param in model.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
for regexp, options in paramwise_options.items():
if re.search(regexp, name):
for key, value in options.items():
if key.endswith('_mult'): # is a multiplier
key = key[:-5]
assert key in optimizer_cfg, \
"{} not in optimizer_cfg".format(key)
value = optimizer_cfg[key] * value
param_group[key] = value
if not dist.is_initialized() or dist.get_rank() == 0:
print_log('paramwise_options -- {}: {}={}'.format(
name, key, value))
# otherwise use the global settings
params.append(param_group)
optimizer_cls = getattr(optimizers, optimizer_cfg.pop('type'))
return optimizer_cls(params, **optimizer_cfg)
def _dist_train(model, dataset, cfg, logger=None, timestamp=None, meta=None):
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
dist=True,
shuffle=True,
replace=getattr(cfg.data, 'sampling_replace', False),
seed=cfg.seed,
drop_last=getattr(cfg.data, 'drop_last', False),
prefetch=cfg.prefetch,
img_norm_cfg=cfg.img_norm_cfg) for ds in dataset
]
optimizer = build_optimizer(model, cfg.optimizer)
if 'use_fp16' in cfg and cfg.use_fp16:
model, optimizer = apex.amp.initialize(model.cuda(), optimizer, opt_level="O1")
print_log('**** Initializing mixed precision done. ****')
# put model on gpus
model = MMDistributedDataParallel(
model if next(model.parameters()).is_cuda else model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# build runner
runner = Runner(
model,
batch_processor,
optimizer,
cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config)
runner.register_hook(DistSamplerSeedHook())
# register custom hooks
for hook in cfg.get('custom_hooks', ()):
if hook.type == 'DeepClusterHook':
common_params = dict(dist_mode=True, data_loaders=data_loaders)
else:
common_params = dict(dist_mode=True)
runner.register_hook(build_hook(hook, common_params))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def _non_dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
dist=False,
shuffle=True,
replace=getattr(cfg.data, 'sampling_replace', False),
seed=cfg.seed,
drop_last=getattr(cfg.data, 'drop_last', False),
prefetch=cfg.prefetch,
img_norm_cfg=cfg.img_norm_cfg) for ds in dataset
]
if 'use_fp16' in cfg and cfg.use_fp16 == True:
raise NotImplementedError('apex do not support non_dist_train!')
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(
model,
batch_processor,
optimizer,
cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config)
# register custom hooks
for hook in cfg.get('custom_hooks', ()):
if hook.type == 'DeepClusterHook':
common_params = dict(dist_mode=False, data_loaders=data_loaders)
else:
common_params = dict(dist_mode=False)
runner.register_hook(build_hook(hook, common_params))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
| 10,378 | 34.913495 | 87 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/third_party/clustering.py | # This file is modified from
# https://github.com/facebookresearch/deepcluster/blob/master/clustering.py
import time
import numpy as np
import faiss
import torch
from scipy.sparse import csr_matrix
__all__ = ['Kmeans', 'PIC']
def preprocess_features(npdata, pca):
"""Preprocess an array of features.
Args:
npdata (np.array N * ndim): features to preprocess
pca (int): dim of output
Returns:
np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized
"""
_, ndim = npdata.shape
#npdata = npdata.astype('float32')
assert npdata.dtype == np.float32
if np.any(np.isnan(npdata)):
raise Exception("nan occurs")
if pca != -1:
print("\nPCA from dim {} to dim {}".format(ndim, pca))
mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5)
mat.train(npdata)
assert mat.is_trained
npdata = mat.apply_py(npdata)
if np.any(np.isnan(npdata)):
percent = np.isnan(npdata).sum().item() / float(np.size(npdata)) * 100
if percent > 0.1:
raise Exception(
"More than 0.1% nan occurs after pca, percent: {}%".format(
percent))
else:
npdata[np.isnan(npdata)] = 0.
# L2 normalization
row_sums = np.linalg.norm(npdata, axis=1)
npdata = npdata / (row_sums[:, np.newaxis] + 1e-10)
return npdata
def make_graph(xb, nnn):
"""Builds a graph of nearest neighbors.
Args:
xb (np.array): data
nnn (int): number of nearest neighbors
Returns:
list: for each data the list of ids to its nnn nearest neighbors
list: for each data the list of distances to its nnn NN
"""
N, dim = xb.shape
# we need only a StandardGpuResources per GPU
res = faiss.StandardGpuResources()
# L2
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = int(torch.cuda.device_count()) - 1
index = faiss.GpuIndexFlatL2(res, dim, flat_config)
index.add(xb)
D, I = index.search(xb, nnn + 1)
return I, D
def run_kmeans(x, nmb_clusters, verbose=False, seed=None):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
list: ids of data in each cluster
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
if seed is not None:
clus.seed = seed
else:
clus.seed = np.random.randint(1234)
clus.niter = 20
clus.max_points_per_centroid = 10000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
losses = faiss.vector_to_array(clus.obj)
centroids = faiss.vector_to_array(clus.centroids).reshape(nmb_clusters,d)
if verbose:
print('k-means loss evolution: {0}'.format(losses))
return [int(n[0]) for n in I], losses[-1], centroids
def arrange_clustering(images_lists):
pseudolabels = []
image_indexes = []
for cluster, images in enumerate(images_lists):
image_indexes.extend(images)
pseudolabels.extend([cluster] * len(images))
indexes = np.argsort(image_indexes)
return np.asarray(pseudolabels)[indexes]
class Kmeans:
def __init__(self, k, pca_dim=256):
self.k = k
self.pca_dim = pca_dim
def cluster(self, feat, verbose=False, seed=None):
"""Performs k-means clustering.
Args:
x_data (np.array N * dim): data to cluster
"""
end = time.time()
# PCA-reducing, whitening and L2-normalization
xb = preprocess_features(feat, self.pca_dim)
# cluster the data
I, loss, centroids = run_kmeans(xb, self.k, verbose, seed=seed)
self.centroids = centroids
self.labels = np.array(I)
if verbose:
print('k-means time: {0:.0f} s'.format(time.time() - end))
return loss
def make_adjacencyW(I, D, sigma):
"""Create adjacency matrix with a Gaussian kernel.
Args:
I (numpy array): for each vertex the ids to its nnn linked vertices
+ first column of identity.
D (numpy array): for each data the l2 distances to its nnn linked vertices
+ first column of zeros.
sigma (float): Bandwith of the Gaussian kernel.
Returns:
csr_matrix: affinity matrix of the graph.
"""
V, k = I.shape
k = k - 1
indices = np.reshape(np.delete(I, 0, 1), (1, -1))
indptr = np.multiply(k, np.arange(V + 1))
def exp_ker(d):
return np.exp(-d / sigma**2)
exp_ker = np.vectorize(exp_ker)
res_D = exp_ker(D)
data = np.reshape(np.delete(res_D, 0, 1), (1, -1))
adj_matrix = csr_matrix((data[0], indices[0], indptr), shape=(V, V))
return adj_matrix
def run_pic(I, D, sigma, alpha):
"""Run PIC algorithm"""
a = make_adjacencyW(I, D, sigma)
graph = a + a.transpose()
cgraph = graph
nim = graph.shape[0]
W = graph
t0 = time.time()
v0 = np.ones(nim) / nim
# power iterations
v = v0.astype('float32')
t0 = time.time()
dt = 0
for i in range(200):
vnext = np.zeros(nim, dtype='float32')
vnext = vnext + W.transpose().dot(v)
vnext = alpha * vnext + (1 - alpha) / nim
# L1 normalize
vnext /= vnext.sum()
v = vnext
if (i == 200 - 1):
clust = find_maxima_cluster(W, v)
return [int(i) for i in clust]
def find_maxima_cluster(W, v):
n, m = W.shape
assert (n == m)
assign = np.zeros(n)
# for each node
pointers = list(range(n))
for i in range(n):
best_vi = 0
l0 = W.indptr[i]
l1 = W.indptr[i + 1]
for l in range(l0, l1):
j = W.indices[l]
vi = W.data[l] * (v[j] - v[i])
if vi > best_vi:
best_vi = vi
pointers[i] = j
n_clus = 0
cluster_ids = -1 * np.ones(n)
for i in range(n):
if pointers[i] == i:
cluster_ids[i] = n_clus
n_clus = n_clus + 1
for i in range(n):
# go from pointers to pointers starting from i until reached a local optim
current_node = i
while pointers[current_node] != current_node:
current_node = pointers[current_node]
assign[i] = cluster_ids[current_node]
assert (assign[i] >= 0)
return assign
class PIC():
"""Class to perform Power Iteration Clustering on a graph of nearest neighbors.
Args:
args: for consistency with k-means init
sigma (float): bandwith of the Gaussian kernel (default 0.2)
nnn (int): number of nearest neighbors (default 5)
alpha (float): parameter in PIC (default 0.001)
distribute_singletons (bool): If True, reassign each singleton to
the cluster of its closest non
singleton nearest neighbors (up to nnn
nearest neighbors).
Attributes:
images_lists (list of list): for each cluster, the list of image indexes
belonging to this cluster
"""
def __init__(self,
args=None,
sigma=0.2,
nnn=5,
alpha=0.001,
distribute_singletons=True,
pca_dim=256):
self.sigma = sigma
self.alpha = alpha
self.nnn = nnn
self.distribute_singletons = distribute_singletons
self.pca_dim = pca_dim
def cluster(self, data, verbose=False):
end = time.time()
# preprocess the data
xb = preprocess_features(data, self.pca_dim)
# construct nnn graph
I, D = make_graph(xb, self.nnn)
# run PIC
clust = run_pic(I, D, self.sigma, self.alpha)
images_lists = {}
for h in set(clust):
images_lists[h] = []
for data, c in enumerate(clust):
images_lists[c].append(data)
# allocate singletons to clusters of their closest NN not singleton
if self.distribute_singletons:
clust_NN = {}
for i in images_lists:
# if singleton
if len(images_lists[i]) == 1:
s = images_lists[i][0]
# for NN
for n in I[s, 1:]:
# if NN is not a singleton
if not len(images_lists[clust[n]]) == 1:
clust_NN[s] = n
break
for s in clust_NN:
del images_lists[clust[s]]
clust[s] = clust[clust_NN[s]]
images_lists[clust[s]].append(s)
self.images_lists = []
self.labels = -1 * np.ones((data.shape[0], ), dtype=np.int)
for i, c in enumerate(images_lists):
self.images_lists.append(images_lists[c])
self.labels[images_lists[c]] = i
assert np.all(self.labels != -1)
if verbose:
print('pic time: {0:.0f} s'.format(time.time() - end))
return 0
| 9,576 | 29.5 | 84 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/classification.py | import numpy as np
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
@MODELS.register_module
class Classification(nn.Module):
"""Simple image classification.
Args:
backbone (dict): Config dict for module of backbone ConvNet.
with_sobel (bool): Whether to apply a Sobel filter on images. Default: False.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
with_sobel=False,
head=None,
pretrained=None):
super(Classification, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.head.init_weights()
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
outs = self.head(x)
loss_inputs = (outs, gt_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def aug_test(self, imgs):
raise NotImplemented
outs = np.mean([self.head(x) for x in self.forward_backbone(imgs)],
axis=0)
return outs
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,370 | 31.413462 | 85 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/simclr.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import GatherLayer
@MODELS.register_module
class SimCLR(nn.Module):
"""SimCLR.
Implementation of "A Simple Framework for Contrastive Learning
of Visual Representations (https://arxiv.org/abs/2002.05709)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self, backbone, neck=None, head=None, pretrained=None):
super(SimCLR, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
@staticmethod
def _create_buffer(N):
mask = 1 - torch.eye(N * 2, dtype=torch.uint8).cuda()
pos_ind = (torch.arange(N * 2).cuda(),
2 * torch.arange(N, dtype=torch.long).unsqueeze(1).repeat(
1, 2).view(-1, 1).squeeze().cuda())
neg_mask = torch.ones((N * 2, N * 2 - 1), dtype=torch.uint8).cuda()
neg_mask[pos_ind] = 0
return mask, pos_ind, neg_mask
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
img = img.reshape(
img.size(0) * 2, img.size(2), img.size(3), img.size(4))
x = self.forward_backbone(img) # 2n
z = self.neck(x)[0] # (2n)xd
z = z / (torch.norm(z, p=2, dim=1, keepdim=True) + 1e-10)
z = torch.cat(GatherLayer.apply(z), dim=0) # (2N)xd
assert z.size(0) % 2 == 0
N = z.size(0) // 2
s = torch.matmul(z, z.permute(1, 0)) # (2N)x(2N)
mask, pos_ind, neg_mask = self._create_buffer(N)
# remove diagonal, (2N)x(2N-1)
s = torch.masked_select(s, mask == 1).reshape(s.size(0), -1)
positive = s[pos_ind].unsqueeze(1) # (2N)x1
# select negative, (2N)x(2N-2)
negative = torch.masked_select(s, neg_mask == 1).reshape(s.size(0), -1)
losses = self.head(positive, negative)
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,961 | 35.018182 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/rotation_pred.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class RotationPred(nn.Module):
"""Rotation prediction.
Implementation of "Unsupervised Representation Learning
by Predicting Image Rotations (https://arxiv.org/abs/1803.07728)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self, backbone, head=None, pretrained=None):
super(RotationPred, self).__init__()
self.backbone = builder.build_backbone(backbone)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.head.init_weights(init_linear='kaiming')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, rot_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
rot_label (Tensor): Labels for the rotations.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
outs = self.head(x)
loss_inputs = (outs, rot_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def forward(self, img, rot_label=None, mode='train', **kwargs):
if mode != "extract" and img.dim() == 5: # Nx4xCxHxW
assert rot_label.dim() == 2 # Nx4
img = img.view(
img.size(0) * img.size(1), img.size(2), img.size(3),
img.size(4)) # (4N)xCxHxW
rot_label = torch.flatten(rot_label) # (4N)
if mode == 'train':
return self.forward_train(img, rot_label, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,294 | 33.684211 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/deepcluster.py | import numpy as np
import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
@MODELS.register_module
class DeepCluster(nn.Module):
"""DeepCluster.
Implementation of "Deep Clustering for Unsupervised Learning
of Visual Features (https://arxiv.org/abs/1807.05520)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
with_sobel (bool): Whether to apply a Sobel filter on images. Default: False.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
with_sobel=False,
neck=None,
head=None,
pretrained=None):
super(DeepCluster, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
# reweight
self.num_classes = head.num_classes
self.loss_weight = torch.ones((self.num_classes, ),
dtype=torch.float32).cuda()
self.loss_weight /= self.loss_weight.sum()
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
self.head.init_weights(init_linear='normal')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, pseudo_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
pseudo_label (Tensor): Label assignments.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
assert len(x) == 1
feature = self.neck(x)
outs = self.head(feature)
loss_inputs = (outs, pseudo_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
def set_reweight(self, labels, reweight_pow=0.5):
"""Loss re-weighting.
Re-weighting the loss according to the number of samples in each class.
Args:
labels (numpy.ndarray): Label assignments.
reweight_pow (float): The power of re-weighting. Default: 0.5.
"""
hist = np.bincount(
labels, minlength=self.num_classes).astype(np.float32)
inv_hist = (1. / (hist + 1e-10))**reweight_pow
weight = inv_hist / inv_hist.sum()
self.loss_weight.copy_(torch.from_numpy(weight))
self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight)
| 4,526 | 33.557252 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/relative_loc.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class RelativeLoc(nn.Module):
"""Relative patch location.
Implementation of "Unsupervised Visual Representation Learning
by Context Prediction (https://arxiv.org/abs/1505.05192)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self, backbone, neck=None, head=None, pretrained=None):
super(RelativeLoc, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if head is not None:
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='normal')
self.head.init_weights(init_linear='normal', std=0.005)
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, patch_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
patch_label (Tensor): Labels for the relative patch locations.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
img1, img2 = torch.chunk(img, 2, dim=1)
x1 = self.forward_backbone(img1) # tuple
x2 = self.forward_backbone(img2) # tuple
x = (torch.cat((x1[0], x2[0]), dim=1),)
x = self.neck(x)
outs = self.head(x)
loss_inputs = (outs, patch_label)
losses = self.head.loss(*loss_inputs)
return losses
def forward_test(self, img, **kwargs):
img1, img2 = torch.chunk(img, 2, dim=1)
x1 = self.forward_backbone(img1) # tuple
x2 = self.forward_backbone(img2) # tuple
x = (torch.cat((x1[0], x2[0]), dim=1),)
x = self.neck(x)
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs]
return dict(zip(keys, out_tensors))
def forward(self, img, patch_label=None, mode='train', **kwargs):
if mode != "extract" and img.dim() == 5: # Nx8x(2C)xHxW
assert patch_label.dim() == 2 # Nx8
img = img.view(
img.size(0) * img.size(1), img.size(2), img.size(3),
img.size(4)) # (8N)x(2C)xHxW
patch_label = torch.flatten(patch_label) # (8N)
if mode == 'train':
return self.forward_train(img, patch_label, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 3,948 | 35.564815 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/moco.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class MOCO(nn.Module):
"""MOCO.
Implementation of "Momentum Contrast for Unsupervised Visual
Representation Learning (https://arxiv.org/abs/1911.05722)".
Part of the code is borrowed from:
"https://github.com/facebookresearch/moco/blob/master/moco/builder.py".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
queue_len (int): Number of negative keys maintained in the queue.
Default: 65536.
feat_dim (int): Dimension of compact feature vectors. Default: 128.
momentum (float): Momentum coefficient for the momentum-updated encoder.
Default: 0.999.
"""
def __init__(self,
backbone,
neck=None,
head=None,
pretrained=None,
queue_len=65536,
feat_dim=128,
momentum=0.999,
**kwargs):
super(MOCO, self).__init__()
self.encoder_q = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.encoder_k = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.encoder_q[0]
for param in self.encoder_k.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
self.queue_len = queue_len
self.momentum = momentum
# create the queue
self.register_buffer("queue", torch.randn(feat_dim, queue_len))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.encoder_q[0].init_weights(pretrained=pretrained)
self.encoder_q[1].init_weights(init_linear='kaiming')
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""Momentum update of the key encoder."""
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + \
param_q.data * (1. - self.momentum)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
"""Update queue."""
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_len % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1)
ptr = (ptr + batch_size) % self.queue_len # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
im_q = img[:, 0, ...].contiguous()
im_k = img[:, 1, ...].contiguous()
# compute query features
q = self.encoder_q(im_q)[0] # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k)[0] # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
losses = self.head(l_pos, l_neg)
self._dequeue_and_enqueue(k)
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 7,486 | 33.187215 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/moco_v3.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
import torch.nn.functional as F
@MODELS.register_module
class MOCOv3(nn.Module):
def __init__(self,
backbone,
projector=None,
predictor=None,
base_momentum=0.999,
temperature=1,
**kwargs):
super(MOCOv3, self).__init__()
self.encoder_q = nn.Sequential(
builder.build_backbone(backbone),
builder.build_neck(projector),
builder.build_neck(predictor))
self.encoder_k = nn.Sequential(
builder.build_backbone(backbone),
builder.build_neck(projector))
self.backbone = self.encoder_q[0]
self.base_momentum = base_momentum
self.momentum = base_momentum
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""Momentum update of the key encoder."""
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + \
param_q.data * (1. - self.momentum)
@torch.no_grad()
def momentum_update(self):
self._momentum_update_key_encoder()
def forward_train(self, img, **kwargs):
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
x1, x2 = img[:, 0, ...].contiguous(), img[:, 1, ...].contiguous()
# compute query features
q1, q2 = self.encoder_q(x1)[0], self.encoder_q(x2)[0] # queries: NxC
q1, q2 = F.normalize(q1), F.normalize(q2)
with torch.no_grad():
k1, k2 = self.encoder_k(x1)[0], self.encoder_k(x2)[0]
k1, k2 = F.normalize(k1), F.normalize(k2)
labels = torch.arange(len(k1)).cuda()
logits1, logits2 = q1 @ k2.T, q2 @ k1.T
loss = 2 * self.temperature \
* (self.criterion(logits1/self.temperature, labels)
+ self.criterion(logits2/self.temperature, labels))
return dict(loss=loss)
def forward_test(self, img, **kwargs):
backbone_feats = self.backbone(img)
last_layer_feat = nn.functional.avg_pool2d(backbone_feats[-1],7)
last_layer_feat = last_layer_feat.view(last_layer_feat.size(0), -1)
return dict(backbone=last_layer_feat.cpu())
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_test(img, **kwargs)
else:
raise Exception("No such mode: {}".format(mode))
| 3,177 | 33.923077 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/extractor.py | import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
import math
from sklearn.cluster import KMeans
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
### For visualization.
@MODELS.register_module
class Extractor(nn.Module):
img_id = 0
def __init__(self,
backbone,
pretrained=None):
super(Extractor, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
def forward(self, img, mode='extract', **kwargs):
if mode == 'extract':
return self.forward_extract(img)
elif mode == 'forward_backbone':
return self.forward_backbone(img)
elif mode == 'multi_layer_map':
return self.forward_multi_layer_visulization(img)
elif mode == 'multi_layer_map_tmp':
return self.forward_multi_layer_visulization_tmp(img)
else:
raise Exception("No such mode: {}".format(mode))
def forward_extract(self, img, **kwargs):
backbone_feats = self.backbone(img)
backbone_feats = self.avgpool(backbone_feats[-1])
backbone_feats = backbone_feats.view(backbone_feats.size(0), -1)
backbone_feats = F.normalize(backbone_feats, p=2, dim=1)
return dict(backbone=backbone_feats.cpu())
def forward_backbone(self, img, **kwargs):
backbone_feats = self.backbone(img)
return backbone_feats
def forward_multi_layer_visulization(self, img, **kwargs):
backbone_feats = self.backbone(img)
batch_img = img.cpu()
out_dir = 'path to saving dir'
size_upsample = (448, 448)
mean = np.array([0.485, 0.456, 0.406])*255
std = np.array([0.229, 0.224, 0.225])*255
for i in range(3):
batch_img[:,i,...] = batch_img[:,i,...] * std[i] + mean[i]
batch_img = np.uint8(batch_img).transpose(0,2,3,1)
selected_ids = np.arange(200)
for b in range(len(batch_img)):
multi_resuts = []
for x in backbone_feats:
if self.img_id not in selected_ids: # only save these two
continue
global_x = self.avgpool(x).view(x.size(0), -1)
global_x = F.normalize(global_x, p=2, dim=1)
x = F.normalize(x, p=2, dim=1) # B, C, H, W
patch = x[b].permute(1,2,0) # H, W, C
patch = patch.view(-1, patch.size(-1))
patch_size = int(math.sqrt(patch.size(0)))
attention_map = self.get_cam(global_feat=global_x[b],
local_feats=patch,
img=batch_img[b],
patch_size=patch_size,
size_upsample=size_upsample)
cluster_map = self.get_clustered_local_feats(
local_feats=patch, img=batch_img[b], patch_size=patch_size, size_upsample=size_upsample
)
multi_resuts.append(cv2.hconcat([*attention_map, *cluster_map]))
final_img = cv2.vconcat(multi_resuts)
if self.img_id in selected_ids:
cv2.imwrite(f'{out_dir}/{self.img_id}.jpg', final_img)
print(f'\n saving to {out_dir}/{self.img_id}.jpg')
self.img_id+=1
if self.img_id > selected_ids[-1]:
exit()
@staticmethod
def get_cam(global_feat, local_feats, img, patch_size, size_upsample=(448,448)):
absolute_cam = (local_feats @ global_feat.unsqueeze(1)).view(-1)
normalized_cam = absolute_cam.clone()
absolute_cam *= 255
absolute_cam = np.uint8(absolute_cam.view(patch_size,-1).cpu().numpy())
absolute_cam = cv2.resize(absolute_cam, size_upsample)
absolute_cam = cv2.applyColorMap(absolute_cam, cv2.COLORMAP_JET)
normalized_cam = (normalized_cam - normalized_cam.min())/(normalized_cam.max() - normalized_cam.min())
normalized_cam *= 255
normalized_cam = np.uint8(normalized_cam.view(patch_size,-1).cpu().numpy())
normalized_cam = cv2.resize(normalized_cam, size_upsample)
normalized_cam = cv2.applyColorMap(normalized_cam, cv2.COLORMAP_JET)
_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
white = [255,255,255]
absolute_cam = absolute_cam * 0.4 + _img * 0.6
normalized_cam = normalized_cam * 0.4 + _img * 0.6
src_img = cv2.copyMakeBorder(np.uint8(_img),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white)
absolute_cam = cv2.copyMakeBorder(np.uint8(absolute_cam),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white)
normalized_cam = cv2.copyMakeBorder(np.uint8(normalized_cam),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white)
attention_map = [src_img, absolute_cam, normalized_cam]
return attention_map
@staticmethod
def get_clustered_local_feats(local_feats, img, patch_size, size_upsample=(448,448), num_clusters=[2,4,6]):
white = [255,255,255]
_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
local_feats = np.ascontiguousarray(local_feats.cpu().numpy())
cluster_results = []
for k in num_clusters:
kmeans = KMeans(n_clusters=k, random_state=0).fit(local_feats)
assignments = np.reshape(kmeans.labels_, (patch_size, patch_size))
cluster_map = cv2.applyColorMap(np.uint8(assignments/k * 255), cv2.COLORMAP_RAINBOW)
cluster_map = cv2.resize(cluster_map, size_upsample, interpolation=cv2.INTER_NEAREST)
cluster_result = cluster_map * 0.4 + _img * 0.6
cluster_result = cv2.copyMakeBorder(np.uint8(cluster_result),10, 10, 10, 10,cv2.BORDER_CONSTANT,value=white )
cluster_results.append(cluster_result)
return cluster_results
def forward_multi_layer_visulization_tmp(self, img, **kwargs):
backbone_feats = self.backbone(img)
batch_img = img.cpu()
novel_dict = {
0 : 'colon_aca',
1 : 'colon_benign',
2 : 'lung_aca',
3 : 'lung_benign',
4 : 'lung_scc',
}
size_upsample = (448, 448)
mean = np.array([0.485, 0.456, 0.406])*255
std = np.array([0.229, 0.224, 0.225])*255
for i in range(3):
batch_img[:,i,...] = batch_img[:,i,...] * std[i] + mean[i]
batch_img = np.uint8(batch_img).transpose(0,2,3,1)
# selected_ids = [62, 74, 113, 119, 154]
selected_ids = [154]
for b in range(len(batch_img)):
multi_resuts = []
print(self.img_id)
for x in backbone_feats:
if self.img_id not in selected_ids: # only save these two
continue
global_x = self.avgpool(x).view(x.size(0), -1)
global_x = F.normalize(global_x, p=2, dim=1)
x = F.normalize(x, p=2, dim=1) # B, C, H, W
patch = x[b].permute(1,2,0) # H, W, C
patch = patch.view(-1, patch.size(-1))
patch_size = int(math.sqrt(patch.size(0)))
assignments_list = self.get_clustered_local_feats_tmp(
local_feats=patch, img=batch_img[b], patch_size=patch_size, size_upsample=size_upsample
)
multi_resuts.append(assignments_list)
if self.img_id in selected_ids:
print(self.img_id, 'now in it')
self.img_id+=1
return multi_resuts, batch_img[b]
self.img_id+=1
if self.img_id > selected_ids[-1]:
exit()
# break
@staticmethod
def get_clustered_local_feats_tmp(local_feats, img, patch_size, size_upsample=(448,448), num_clusters=[2,4,6]):
white = [255,255,255]
_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
local_feats = np.ascontiguousarray(local_feats.cpu().numpy())
assignment_list = []
for k in num_clusters:
kmeans = KMeans(n_clusters=k, random_state=0).fit(local_feats)
assignment_list.append(kmeans.labels_)
return assignment_list | 8,632 | 41.318627 | 121 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/npid.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class NPID(nn.Module):
"""NPID.
Implementation of "Unsupervised Feature Learning via Non-parametric
Instance Discrimination (https://arxiv.org/abs/1805.01978)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
memory_bank (dict): Config dict for module of memory banks. Default: None.
neg_num (int): Number of negative samples for each image. Default: 65536.
ensure_neg (bool): If False, there is a small probability
that negative samples contain positive ones. Default: False.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
neck=None,
head=None,
memory_bank=None,
neg_num=65536,
ensure_neg=False,
pretrained=None):
super(NPID, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
self.head = builder.build_head(head)
self.memory_bank = builder.build_memory(memory_bank)
self.init_weights(pretrained=pretrained)
self.neg_num = neg_num
self.ensure_neg = ensure_neg
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
x = self.backbone(img)
return x
def forward_train(self, img, idx, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
idx (Tensor): Index corresponding to each image.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.forward_backbone(img)
idx = idx.cuda()
feature = self.neck(x)[0]
feature = nn.functional.normalize(feature) # BxC
bs, feat_dim = feature.shape[:2]
neg_idx = self.memory_bank.multinomial.draw(bs * self.neg_num)
if self.ensure_neg:
neg_idx = neg_idx.view(bs, -1)
while True:
wrong = (neg_idx == idx.view(-1, 1))
if wrong.sum().item() > 0:
neg_idx[wrong] = self.memory_bank.multinomial.draw(
wrong.sum().item())
else:
break
neg_idx = neg_idx.flatten()
pos_feat = torch.index_select(self.memory_bank.feature_bank, 0,
idx) # BXC
neg_feat = torch.index_select(self.memory_bank.feature_bank, 0,
neg_idx).view(bs, self.neg_num,
feat_dim) # BxKxC
pos_logits = torch.einsum('nc,nc->n',
[pos_feat, feature]).unsqueeze(-1)
neg_logits = torch.bmm(neg_feat, feature.unsqueeze(2)).squeeze(2)
losses = self.head(pos_logits, neg_logits)
# update memory bank
with torch.no_grad():
self.memory_bank.update(idx, feature.detach())
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 4,658 | 34.564885 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/byol.py | import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
@MODELS.register_module
class BYOL(nn.Module):
"""BYOL.
Implementation of "Bootstrap Your Own Latent: A New Approach to
Self-Supervised Learning (https://arxiv.org/abs/2006.07733)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
base_momentum (float): The base momentum coefficient for the target network.
Default: 0.996.
"""
def __init__(self,
backbone,
neck=None,
head=None,
pretrained=None,
base_momentum=0.996,
**kwargs):
super(BYOL, self).__init__()
self.online_net = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.target_net = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.online_net[0]
for param in self.target_net.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
self.base_momentum = base_momentum
self.momentum = base_momentum
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.online_net[0].init_weights(pretrained=pretrained) # backbone
self.online_net[1].init_weights(init_linear='kaiming') # projection
for param_ol, param_tgt in zip(self.online_net.parameters(),
self.target_net.parameters()):
param_tgt.data.copy_(param_ol.data)
# init the predictor in the head
self.head.init_weights()
@torch.no_grad()
def _momentum_update(self):
"""Momentum update of the target network."""
for param_ol, param_tgt in zip(self.online_net.parameters(),
self.target_net.parameters()):
param_tgt.data = param_tgt.data * self.momentum + \
param_ol.data * (1. - self.momentum)
@torch.no_grad()
def momentum_update(self):
self._momentum_update()
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
img_v1 = img[:, 0, ...].contiguous()
img_v2 = img[:, 1, ...].contiguous()
# compute query features
proj_online_v1 = self.online_net(img_v1)[0]
proj_online_v2 = self.online_net(img_v2)[0]
with torch.no_grad():
proj_target_v1 = self.target_net(img_v1)[0].clone().detach()
proj_target_v2 = self.target_net(img_v2)[0].clone().detach()
loss = self.head(proj_online_v1, proj_target_v2)['loss'] + \
self.head(proj_online_v2, proj_target_v1)['loss']
return dict(loss=loss)
def forward_test(self, img, **kwargs):
pass
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
| 4,225 | 36.070175 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/builder.py | from torch import nn
from openselfsup.utils import build_from_cfg
from .registry import (BACKBONES, MODELS, NECKS, HEADS, MEMORIES, LOSSES)
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, it is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Default: None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
def build_memory(cfg):
"""Build memory."""
return build(cfg, MEMORIES)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_model(cfg):
"""Build model."""
return build(cfg, MODELS)
| 1,274 | 21.368421 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/odc.py | import numpy as np
import torch
import torch.nn as nn
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from .utils import Sobel
@MODELS.register_module
class ODC(nn.Module):
"""ODC.
Official implementation of
"Online Deep Clustering for Unsupervised Representation Learning
(https://arxiv.org/abs/2006.10645)".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
with_sobel (bool): Whether to apply a Sobel filter on images. Default: False.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
memory_bank (dict): Module of memory banks. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
"""
def __init__(self,
backbone,
with_sobel=False,
neck=None,
head=None,
memory_bank=None,
pretrained=None):
super(ODC, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
if head is not None:
self.head = builder.build_head(head)
if memory_bank is not None:
self.memory_bank = builder.build_memory(memory_bank)
self.init_weights(pretrained=pretrained)
# set reweight tensors
self.num_classes = head.num_classes
self.loss_weight = torch.ones((self.num_classes, ),
dtype=torch.float32).cuda()
self.loss_weight /= self.loss_weight.sum()
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
self.head.init_weights(init_linear='normal')
def forward_backbone(self, img):
"""Forward backbone.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
tuple[Tensor]: backbone outputs.
"""
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, idx, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
idx (Tensor): Index corresponding to each image.
kwargs: Any keyword arguments to be used to forward.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# forward & backward
x = self.forward_backbone(img)
feature = self.neck(x)
outs = self.head(feature)
if self.memory_bank.label_bank.is_cuda:
loss_inputs = (outs, self.memory_bank.label_bank[idx])
else:
loss_inputs = (outs, self.memory_bank.label_bank[idx.cpu()].cuda())
losses = self.head.loss(*loss_inputs)
# update samples memory
change_ratio = self.memory_bank.update_samples_memory(
idx, feature[0].detach())
losses['change_ratio'] = change_ratio
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img) # tuple
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs] # NxC
return dict(zip(keys, out_tensors))
def forward(self, img, mode='train', **kwargs):
if mode == 'train':
return self.forward_train(img, **kwargs)
elif mode == 'test':
return self.forward_test(img, **kwargs)
elif mode == 'extract':
return self.forward_backbone(img)
else:
raise Exception("No such mode: {}".format(mode))
def set_reweight(self, labels=None, reweight_pow=0.5):
"""Loss re-weighting.
Re-weighting the loss according to the number of samples in each class.
Args:
labels (numpy.ndarray): Label assignments. Default: None.
reweight_pow (float): The power of re-weighting. Default: 0.5.
"""
if labels is None:
if self.memory_bank.label_bank.is_cuda:
labels = self.memory_bank.label_bank.cpu().numpy()
else:
labels = self.memory_bank.label_bank.numpy()
hist = np.bincount(
labels, minlength=self.num_classes).astype(np.float32)
inv_hist = (1. / (hist + 1e-5))**reweight_pow
weight = inv_hist / inv_hist.sum()
self.loss_weight.copy_(torch.from_numpy(weight))
self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight)
| 5,322 | 34.966216 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/necks.py | import torch
import torch.nn as nn
from packaging import version
from mmcv.cnn import kaiming_init, normal_init
from .registry import NECKS
from .utils import build_norm_layer
def _init_weights(module, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in module.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d,
nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@NECKS.register_module
class LinearNeck(nn.Module):
"""Linear neck: fc only.
"""
def __init__(self, in_channels, out_channels, with_avg_pool=True):
super(LinearNeck, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_channels, out_channels)
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
return [self.fc(x.view(x.size(0), -1))]
@NECKS.register_module
class RelativeLocNeck(nn.Module):
"""Relative patch location neck: fc-bn-relu-dropout.
"""
def __init__(self,
in_channels,
out_channels,
sync_bn=False,
with_avg_pool=True):
super(RelativeLocNeck, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if version.parse(torch.__version__) < version.parse("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.fc = nn.Linear(in_channels * 2, out_channels)
if sync_bn:
_, self.bn = build_norm_layer(
dict(type='SyncBN', momentum=0.003),
out_channels)
else:
self.bn = nn.BatchNorm1d(
out_channels, momentum=0.003)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout()
self.sync_bn = sync_bn
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear, std=0.005, bias=0.1)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn, x)
else:
x = self.bn(x)
x = self.relu(x)
x = self.drop(x)
return [x]
@NECKS.register_module
class NonLinearNeckV0(nn.Module):
"""The non-linear neck in ODC, fc-bn-relu-dropout-fc-relu.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
sync_bn=False,
with_avg_pool=True):
super(NonLinearNeckV0, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if version.parse(torch.__version__) < version.parse("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.fc0 = nn.Linear(in_channels, hid_channels)
if sync_bn:
_, self.bn0 = build_norm_layer(
dict(type='SyncBN', momentum=0.001, affine=False),
hid_channels)
else:
self.bn0 = nn.BatchNorm1d(
hid_channels, momentum=0.001, affine=False)
self.fc1 = nn.Linear(hid_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout()
self.sync_bn = sync_bn
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn0, x)
else:
x = self.bn0(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc1(x)
x = self.relu(x)
return [x]
@NECKS.register_module
class NonLinearNeckV1(nn.Module):
"""The non-linear neck in MoCo v2: fc-relu-fc.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
with_avg_pool=True):
super(NonLinearNeckV1, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.mlp = nn.Sequential(
nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True),
nn.Linear(hid_channels, out_channels))
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
return [self.mlp(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckV2(nn.Module):
"""The non-linear neck in byol: fc-bn-relu-fc.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
with_avg_pool=True):
super(NonLinearNeckV2, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.mlp = nn.Sequential(
nn.Linear(in_channels, hid_channels),
nn.BatchNorm1d(hid_channels),
nn.ReLU(inplace=True),
nn.Linear(hid_channels, out_channels))
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def forward(self, x):
assert len(x) == 1, "Got: {}".format(len(x))
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
return [self.mlp(x.view(x.size(0), -1))]
@NECKS.register_module
class NonLinearNeckSimCLR(nn.Module):
"""SimCLR non-linear neck.
Structure: fc(no_bias)-bn(has_bias)-[relu-fc(no_bias)-bn(no_bias)].
The substructures in [] can be repeated. For the SimCLR default setting,
the repeat time is 1.
However, PyTorch does not support to specify (weight=True, bias=False).
It only support \"affine\" including the weight and bias. Hence, the
second BatchNorm has bias in this implementation. This is different from
the official implementation of SimCLR.
Since SyncBatchNorm in pytorch<1.4.0 does not support 2D input, the input is
expanded to 4D with shape: (N,C,1,1). Not sure if this workaround
has no bugs. See the pull request here:
https://github.com/pytorch/pytorch/pull/29626.
Args:
num_layers (int): Number of fc layers, it is 2 in the SimCLR default setting.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
num_layers=2,
sync_bn=True,
with_bias=False,
with_last_bn=True,
with_avg_pool=True):
super(NonLinearNeckSimCLR, self).__init__()
self.sync_bn = sync_bn
self.with_last_bn = with_last_bn
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if version.parse(torch.__version__) < version.parse("1.4.0"):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.relu = nn.ReLU(inplace=True)
self.fc0 = nn.Linear(in_channels, hid_channels, bias=with_bias)
if sync_bn:
_, self.bn0 = build_norm_layer(
dict(type='SyncBN'), hid_channels)
else:
self.bn0 = nn.BatchNorm1d(hid_channels)
self.fc_names = []
self.bn_names = []
for i in range(1, num_layers):
this_channels = out_channels if i == num_layers - 1 \
else hid_channels
self.add_module(
"fc{}".format(i),
nn.Linear(hid_channels, this_channels, bias=with_bias))
self.fc_names.append("fc{}".format(i))
if i != num_layers - 1 or self.with_last_bn:
if sync_bn:
self.add_module(
"bn{}".format(i),
build_norm_layer(dict(type='SyncBN'), this_channels)[1])
else:
self.add_module(
"bn{}".format(i),
nn.BatchNorm1d(this_channels))
self.bn_names.append("bn{}".format(i))
else:
self.bn_names.append(None)
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def _forward_syncbn(self, module, x):
assert x.dim() == 2
if self.expand_for_syncbn:
x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = module(x)
return x
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn0, x)
else:
x = self.bn0(x)
for fc_name, bn_name in zip(self.fc_names, self.bn_names):
fc = getattr(self, fc_name)
x = self.relu(x)
x = fc(x)
if bn_name is not None:
bn = getattr(self, bn_name)
if self.sync_bn:
x = self._forward_syncbn(bn, x)
else:
x = bn(x)
return [x]
@NECKS.register_module
class AvgPoolNeck(nn.Module):
"""Average pooling neck.
"""
def __init__(self):
super(AvgPoolNeck, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def init_weights(self, **kwargs):
pass
def forward(self, x):
assert len(x) == 1
return [self.avg_pool(x[0])]
| 11,269 | 30.836158 | 85 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/memories/simple_memory.py | import torch
import torch.nn as nn
import torch.distributed as dist
from mmcv.runner import get_dist_info
from openselfsup.utils import AliasMethod
from ..registry import MEMORIES
@MEMORIES.register_module
class SimpleMemory(nn.Module):
"""Simple memory bank for NPID.
Args:
length (int): Number of features stored in the memory bank.
feat_dim (int): Dimension of stored features.
momentum (float): Momentum coefficient for updating features.
"""
def __init__(self, length, feat_dim, momentum, **kwargs):
super(SimpleMemory, self).__init__()
self.rank, self.num_replicas = get_dist_info()
self.feature_bank = torch.randn(length, feat_dim).cuda()
self.feature_bank = nn.functional.normalize(self.feature_bank)
self.momentum = momentum
self.multinomial = AliasMethod(torch.ones(length))
self.multinomial.cuda()
def update(self, ind, feature):
"""Update features in memory bank.
Args:
ind (Tensor): Indices for the batch of features.
feature (Tensor): Batch of features.
"""
feature_norm = nn.functional.normalize(feature)
ind, feature_norm = self._gather(ind, feature_norm)
feature_old = self.feature_bank[ind, ...]
feature_new = (1 - self.momentum) * feature_old + \
self.momentum * feature_norm
feature_new_norm = nn.functional.normalize(feature_new)
self.feature_bank[ind, ...] = feature_new_norm
def _gather(self, ind, feature):
"""Gather indices and features.
Args:
ind (Tensor): Indices for the batch of features.
feature (Tensor): Batch of features.
Returns:
Tensor: Gathered indices.
Tensor: Gathered features.
"""
ind_gathered = [
torch.ones_like(ind).cuda() for _ in range(self.num_replicas)
]
feature_gathered = [
torch.ones_like(feature).cuda() for _ in range(self.num_replicas)
]
dist.all_gather(ind_gathered, ind)
dist.all_gather(feature_gathered, feature)
ind_gathered = torch.cat(ind_gathered, dim=0)
feature_gathered = torch.cat(feature_gathered, dim=0)
return ind_gathered, feature_gathered
| 2,305 | 33.939394 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/memories/odc_memory.py | import numpy as np
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
import torch.distributed as dist
from mmcv.runner import get_dist_info
from ..registry import MEMORIES
@MEMORIES.register_module
class ODCMemory(nn.Module):
"""Memory modules for ODC.
Args:
length (int): Number of features stored in samples memory.
feat_dim (int): Dimension of stored features.
momentum (float): Momentum coefficient for updating features.
num_classes (int): Number of clusters.
min_cluster (int): Minimal cluster size.
"""
def __init__(self, length, feat_dim, momentum, num_classes, min_cluster,
**kwargs):
super(ODCMemory, self).__init__()
self.rank, self.num_replicas = get_dist_info()
if self.rank == 0:
self.feature_bank = torch.zeros((length, feat_dim),
dtype=torch.float32)
self.label_bank = torch.zeros((length, ), dtype=torch.long)
self.centroids = torch.zeros((num_classes, feat_dim),
dtype=torch.float32).cuda()
self.kmeans = KMeans(n_clusters=2, random_state=0, max_iter=20)
self.feat_dim = feat_dim
self.initialized = False
self.momentum = momentum
self.num_classes = num_classes
self.min_cluster = min_cluster
self.debug = kwargs.get('debug', False)
def init_memory(self, feature, label):
"""Initialize memory modules."""
self.initialized = True
self.label_bank.copy_(torch.from_numpy(label).long())
# make sure no empty clusters
assert (np.bincount(label, minlength=self.num_classes) != 0).all()
if self.rank == 0:
feature /= (np.linalg.norm(feature, axis=1).reshape(-1, 1) + 1e-10)
self.feature_bank.copy_(torch.from_numpy(feature))
centroids = self._compute_centroids()
self.centroids.copy_(centroids)
dist.broadcast(self.centroids, 0)
def _compute_centroids_ind(self, cinds):
"""Compute a few centroids."""
assert self.rank == 0
num = len(cinds)
centroids = torch.zeros((num, self.feat_dim), dtype=torch.float32)
for i, c in enumerate(cinds):
ind = np.where(self.label_bank.numpy() == c)[0]
centroids[i, :] = self.feature_bank[ind, :].mean(dim=0)
return centroids
def _compute_centroids(self):
"""Compute all non-empty centroids."""
assert self.rank == 0
l = self.label_bank.numpy()
argl = np.argsort(l)
sortl = l[argl]
diff_pos = np.where(sortl[1:] - sortl[:-1] != 0)[0] + 1
start = np.insert(diff_pos, 0, 0)
end = np.insert(diff_pos, len(diff_pos), len(l))
class_start = sortl[start]
# keep empty class centroids unchanged
centroids = self.centroids.cpu().clone()
for i, st, ed in zip(class_start, start, end):
centroids[i, :] = self.feature_bank[argl[st:ed], :].mean(dim=0)
return centroids
def _gather(self, ind, feature):
"""Gather indices and features."""
# if not hasattr(self, 'ind_gathered'):
# self.ind_gathered = [torch.ones_like(ind).cuda()
# for _ in range(self.num_replicas)]
# if not hasattr(self, 'feature_gathered'):
# self.feature_gathered = [torch.ones_like(feature).cuda()
# for _ in range(self.num_replicas)]
ind_gathered = [
torch.ones_like(ind).cuda() for _ in range(self.num_replicas)
]
feature_gathered = [
torch.ones_like(feature).cuda() for _ in range(self.num_replicas)
]
dist.all_gather(ind_gathered, ind)
dist.all_gather(feature_gathered, feature)
ind_gathered = torch.cat(ind_gathered, dim=0)
feature_gathered = torch.cat(feature_gathered, dim=0)
return ind_gathered, feature_gathered
def update_samples_memory(self, ind, feature):
"""Update samples memory."""
assert self.initialized
feature_norm = feature / (feature.norm(dim=1).view(-1, 1) + 1e-10
) # normalize
ind, feature_norm = self._gather(
ind, feature_norm) # ind: (N*w), feature: (N*w)xk, cuda tensor
ind = ind.cpu()
if self.rank == 0:
feature_old = self.feature_bank[ind, ...].cuda()
feature_new = (1 - self.momentum) * feature_old + \
self.momentum * feature_norm
feature_norm = feature_new / (
feature_new.norm(dim=1).view(-1, 1) + 1e-10)
self.feature_bank[ind, ...] = feature_norm.cpu()
dist.barrier()
dist.broadcast(feature_norm, 0)
# compute new labels
similarity_to_centroids = torch.mm(self.centroids,
feature_norm.permute(1, 0)) # CxN
newlabel = similarity_to_centroids.argmax(dim=0) # cuda tensor
newlabel_cpu = newlabel.cpu()
change_ratio = (newlabel_cpu !=
self.label_bank[ind]).sum().float().cuda() \
/ float(newlabel_cpu.shape[0])
self.label_bank[ind] = newlabel_cpu.clone() # copy to cpu
return change_ratio
def deal_with_small_clusters(self):
"""Deal with small clusters."""
# check empty class
hist = np.bincount(self.label_bank.numpy(), minlength=self.num_classes)
small_clusters = np.where(hist < self.min_cluster)[0].tolist()
if self.debug and self.rank == 0:
print("mincluster: {}, num of small class: {}".format(
hist.min(), len(small_clusters)))
if len(small_clusters) == 0:
return
# re-assign samples in small clusters to make them empty
for s in small_clusters:
ind = np.where(self.label_bank.numpy() == s)[0]
if len(ind) > 0:
inclusion = torch.from_numpy(
np.setdiff1d(
np.arange(self.num_classes),
np.array(small_clusters),
assume_unique=True)).cuda()
if self.rank == 0:
target_ind = torch.mm(
self.centroids[inclusion, :],
self.feature_bank[ind, :].cuda().permute(
1, 0)).argmax(dim=0)
target = inclusion[target_ind]
else:
target = torch.zeros((ind.shape[0], ),
dtype=torch.int64).cuda()
dist.all_reduce(target)
self.label_bank[ind] = torch.from_numpy(target.cpu().numpy())
# deal with empty cluster
self._redirect_empty_clusters(small_clusters)
def update_centroids_memory(self, cinds=None):
"""Update centroids memory."""
if self.rank == 0:
if self.debug:
print("updating centroids ...")
if cinds is None:
center = self._compute_centroids()
self.centroids.copy_(center)
else:
center = self._compute_centroids_ind(cinds)
self.centroids[
torch.LongTensor(cinds).cuda(), :] = center.cuda()
dist.broadcast(self.centroids, 0)
def _partition_max_cluster(self, max_cluster):
"""Partition the largest cluster into two sub-clusters."""
assert self.rank == 0
max_cluster_inds = np.where(self.label_bank == max_cluster)[0]
assert len(max_cluster_inds) >= 2
max_cluster_features = self.feature_bank[max_cluster_inds, :]
if np.any(np.isnan(max_cluster_features.numpy())):
raise Exception("Has nan in features.")
kmeans_ret = self.kmeans.fit(max_cluster_features)
sub_cluster1_ind = max_cluster_inds[kmeans_ret.labels_ == 0]
sub_cluster2_ind = max_cluster_inds[kmeans_ret.labels_ == 1]
if not (len(sub_cluster1_ind) > 0 and len(sub_cluster2_ind) > 0):
print(
"Warning: kmeans partition fails, resort to random partition.")
sub_cluster1_ind = np.random.choice(
max_cluster_inds, len(max_cluster_inds) // 2, replace=False)
sub_cluster2_ind = np.setdiff1d(
max_cluster_inds, sub_cluster1_ind, assume_unique=True)
return sub_cluster1_ind, sub_cluster2_ind
def _redirect_empty_clusters(self, empty_clusters):
"""Re-direct empty clusters."""
for e in empty_clusters:
assert (self.label_bank != e).all().item(), \
"Cluster #{} is not an empty cluster.".format(e)
max_cluster = np.bincount(
self.label_bank, minlength=self.num_classes).argmax().item()
# gather partitioning indices
if self.rank == 0:
sub_cluster1_ind, sub_cluster2_ind = self._partition_max_cluster(
max_cluster)
size1 = torch.LongTensor([len(sub_cluster1_ind)]).cuda()
size2 = torch.LongTensor([len(sub_cluster2_ind)]).cuda()
sub_cluster1_ind_tensor = torch.from_numpy(
sub_cluster1_ind).long().cuda()
sub_cluster2_ind_tensor = torch.from_numpy(
sub_cluster2_ind).long().cuda()
else:
size1 = torch.LongTensor([0]).cuda()
size2 = torch.LongTensor([0]).cuda()
dist.all_reduce(size1)
dist.all_reduce(size2)
if self.rank != 0:
sub_cluster1_ind_tensor = torch.zeros(
(size1, ), dtype=torch.int64).cuda()
sub_cluster2_ind_tensor = torch.zeros(
(size2, ), dtype=torch.int64).cuda()
dist.broadcast(sub_cluster1_ind_tensor, 0)
dist.broadcast(sub_cluster2_ind_tensor, 0)
if self.rank != 0:
sub_cluster1_ind = sub_cluster1_ind_tensor.cpu().numpy()
sub_cluster2_ind = sub_cluster2_ind_tensor.cpu().numpy()
# reassign samples in partition #2 to the empty class
self.label_bank[sub_cluster2_ind] = e
# update centroids of max_cluster and e
self.update_centroids_memory([max_cluster, e])
| 10,441 | 43.623932 | 81 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/multi_pooling.py | import torch.nn as nn
class MultiPooling(nn.Module):
"""Pooling layers for features from multiple depth."""
POOL_PARAMS = {
'resnet50': [
dict(kernel_size=10, stride=10, padding=4),
dict(kernel_size=16, stride=8, padding=0),
dict(kernel_size=13, stride=5, padding=0),
dict(kernel_size=8, stride=3, padding=0),
dict(kernel_size=6, stride=1, padding=0)
]
}
POOL_SIZES = {'resnet50': [12, 6, 4, 3, 2]}
POOL_DIMS = {'resnet50': [9216, 9216, 8192, 9216, 8192]}
def __init__(self,
pool_type='adaptive',
in_indices=(0, ),
backbone='resnet50'):
super(MultiPooling, self).__init__()
assert pool_type in ['adaptive', 'specified']
if pool_type == 'adaptive':
self.pools = nn.ModuleList([
nn.AdaptiveAvgPool2d(self.POOL_SIZES[backbone][l])
for l in in_indices
])
else:
self.pools = nn.ModuleList([
nn.AvgPool2d(**self.POOL_PARAMS[backbone][l])
for l in in_indices
])
def forward(self, x):
assert isinstance(x, (list, tuple))
return [p(xx) for p, xx in zip(self.pools, x)]
| 1,280 | 31.846154 | 66 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/norm.py | import torch.nn as nn
norm_cfg = {
# format: layer_type: (abbreviation, module)
'BN': ('bn', nn.BatchNorm2d),
'SyncBN': ('bn', nn.SyncBatchNorm),
'GN': ('gn', nn.GroupNorm),
# and potentially 'SN'
}
def build_norm_layer(cfg, num_features, postfix=''):
"""Build normalization layer.
Args:
cfg (dict): cfg should contain:
type (str): identify norm layer type.
layer args: args needed to instantiate a norm layer.
requires_grad (bool): [optional] whether stop gradient updates
num_features (int): number of channels from input.
postfix (int, str): appended into norm abbreviation to
create named layer.
Returns:
name (str): abbreviation + postfix
layer (nn.Module): created norm layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in norm_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-5)
if layer_type != 'GN':
layer = norm_layer(num_features, **cfg_)
if layer_type == 'SyncBN':
layer._specify_ddp_gpu_num(1)
else:
assert 'num_groups' in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer
| 1,684 | 29.089286 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/scale.py | import torch
import torch.nn as nn
class Scale(nn.Module):
"""A learnable scale parameter."""
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
| 305 | 20.857143 | 73 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/sobel.py | import torch
import torch.nn as nn
class Sobel(nn.Module):
"""Sobel layer."""
def __init__(self):
super(Sobel, self).__init__()
grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
grayscale.weight.data.fill_(1.0 / 3.0)
grayscale.bias.data.zero_()
sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
sobel_filter.weight.data[0, 0].copy_(
torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
sobel_filter.weight.data[1, 0].copy_(
torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
sobel_filter.bias.data.zero_()
self.sobel = nn.Sequential(grayscale, sobel_filter)
for p in self.sobel.parameters():
p.requires_grad = False
def forward(self, x):
return self.sobel(x)
| 840 | 32.64 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/conv_ws.py | import torch.nn as nn
import torch.nn.functional as F
def conv_ws_2d(input,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
eps=1e-5):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2d(nn.Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
eps=1e-5):
super(ConvWS2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups, self.eps)
| 1,335 | 27.425532 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/conv_module.py | import warnings
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from .conv_ws import ConvWS2d
from .norm import build_norm_layer
conv_cfg = {
'Conv': nn.Conv2d,
'ConvWS': ConvWS2d,
}
def build_conv_layer(cfg, *args, **kwargs):
"""Build convolution layer.
Args:
cfg (None or dict): Cfg should contain:
type (str): Identify conv layer type.
layer args: Args needed to instantiate a conv layer.
Returns:
nn.Module: Created conv layer.
"""
if cfg is None:
cfg_ = dict(type='Conv')
else:
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in conv_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
class ConvModule(nn.Module):
"""A conv block that contains conv/norm/activation layers.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
activation (str or None): Activation type, "ReLU" by default.
inplace (bool): Whether to use inplace mode for activation.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=None,
norm_cfg=None,
activation='relu',
inplace=True,
order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == set(['conv', 'norm', 'act'])
self.with_norm = norm_cfg is not None
self.with_activation = activation is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
# build convolution layer
self.conv = build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
if order.index('norm') > order.index('conv'):
norm_channels = out_channels
else:
norm_channels = in_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
# build activation layer
if self.with_activation:
# TODO: introduce `act_cfg` and supports more activation layers
if self.activation not in ['relu']:
raise ValueError('{} is currently not supported.'.format(
self.activation))
if self.activation == 'relu':
self.activate = nn.ReLU(inplace=inplace)
# Use msra init by default
self.init_weights()
@property
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
nonlinearity = 'relu' if self.activation is None else self.activation
kaiming_init(self.conv, mode='fan_in', nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if layer == 'conv':
x = self.conv(x)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
return x
| 5,723 | 33.902439 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/accuracy.py | import torch.nn as nn
def accuracy(pred, target, topk=1):
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, )):
super().__init__()
self.topk = topk
def forward(self, pred, target):
return accuracy(pred, target, self.topk)
| 801 | 24.0625 | 69 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/utils/gather_layer.py | import torch
import torch.distributed as dist
class GatherLayer(torch.autograd.Function):
"""Gather tensors from all process, supporting backward propagation.
"""
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
output = [torch.zeros_like(input) \
for _ in range(dist.get_world_size())]
dist.all_gather(output, input)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
input, = ctx.saved_tensors
grad_out = torch.zeros_like(input)
grad_out[:] = grads[dist.get_rank()]
return grad_out
| 618 | 25.913043 | 72 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/backbones/resnet.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from openselfsup.utils import get_root_logger
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=dilation,
downsample=downsample,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
return nn.Sequential(*layers)
@BACKBONES.register_module
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Normally 3.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from openselfsup.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3, 4),
style='pytorch',
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
with_cp=False,
zero_init_residual=False):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages + 1
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer(in_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=True, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x) # r50: 64x128x128
if 0 in self.out_indices:
outs.append(x)
x = self.maxpool(x) # r50: 64x56x56
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i + 1 in self.out_indices:
outs.append(x)
# r50: 1-256x56x56; 2-512x28x28; 3-1024x14x14; 4-2048x7x7
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 13,648 | 30.74186 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/backbones/resnext.py | import math
import torch.nn as nn
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes * (base_width / 64)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
groups=1,
base_width=4,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
gcb=None):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=dilation,
downsample=downsample,
groups=groups,
base_width=base_width,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation,
groups=groups,
base_width=base_width,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb))
return nn.Sequential(*layers)
@BACKBONES.register_module
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Normally 3.
num_stages (int): Resnet stages, normally 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from openselfsup.models import ResNeXt
>>> import torch
>>> self = ResNeXt(depth=50)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.groups = groups
self.base_width = base_width
self.inplanes = 64
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
gcb = self.gcb if self.stage_with_gcb[i] else None
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
groups=self.groups,
base_width=self.base_width,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
gcb=gcb)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
| 7,594 | 33.058296 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/contrastive_head.py | import torch
import torch.nn as nn
from ..registry import HEADS
@HEADS.register_module
class ContrastiveHead(nn.Module):
"""Head for contrastive learning.
Args:
temperature (float): The temperature hyper-parameter that
controls the concentration level of the distribution.
Default: 0.1.
"""
def __init__(self, temperature=0.1):
super(ContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
def forward(self, pos, neg):
"""Forward head.
Args:
pos (Tensor): Nx1 positive similarity.
neg (Tensor): Nxk negative similarity.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
N = pos.size(0)
logits = torch.cat((pos, neg), dim=1)
logits /= self.temperature
labels = torch.zeros((N, ), dtype=torch.long).cuda()
losses = dict()
losses['loss'] = self.criterion(logits, labels)
return losses
| 1,053 | 26.025641 | 65 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/cls_head.py | import torch.nn as nn
from mmcv.cnn import kaiming_init, normal_init
from ..utils import accuracy
from ..registry import HEADS
@HEADS.register_module
class ClsHead(nn.Module):
"""Simplest classifier head, with only one fc layer.
"""
def __init__(self,
with_avg_pool=False,
in_channels=2048,
num_classes=1000):
super(ClsHead, self).__init__()
self.with_avg_pool = with_avg_pool
self.in_channels = in_channels
self.num_classes = num_classes
self.criterion = nn.CrossEntropyLoss()
if self.with_avg_pool:
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc_cls = nn.Linear(in_channels, num_classes)
def init_weights(self, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert isinstance(x, (tuple, list)) and len(x) == 1
x = x[0]
if self.with_avg_pool:
assert x.dim() == 4, \
"Tensor must has 4 dims, got: {}".format(x.dim())
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
return [cls_score]
def loss(self, cls_score, labels):
losses = dict()
assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1
losses['loss'] = self.criterion(cls_score[0], labels)
losses['acc'] = accuracy(cls_score[0], labels)
return losses
| 2,119 | 33.754098 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/multi_cls_head.py | import torch.nn as nn
from ..utils import accuracy
from ..registry import HEADS
from ..utils import build_norm_layer, MultiPooling
@HEADS.register_module
class MultiClsHead(nn.Module):
"""Multiple classifier heads.
"""
FEAT_CHANNELS = {'resnet50': [64, 256, 512, 1024, 2048]}
FEAT_LAST_UNPOOL = {'resnet50': 2048 * 7 * 7}
def __init__(self,
pool_type='adaptive',
in_indices=(0, ),
with_last_layer_unpool=False,
backbone='resnet50',
norm_cfg=dict(type='BN'),
num_classes=1000):
super(MultiClsHead, self).__init__()
assert norm_cfg['type'] in ['BN', 'SyncBN', 'GN', 'null']
self.with_last_layer_unpool = with_last_layer_unpool
self.with_norm = norm_cfg['type'] != 'null'
self.criterion = nn.CrossEntropyLoss()
self.multi_pooling = MultiPooling(pool_type, in_indices, backbone)
if self.with_norm:
self.norms = nn.ModuleList([
build_norm_layer(norm_cfg, self.FEAT_CHANNELS[backbone][l])[1]
for l in in_indices
])
self.fcs = nn.ModuleList([
nn.Linear(self.multi_pooling.POOL_DIMS[backbone][l], num_classes)
for l in in_indices
])
if with_last_layer_unpool:
self.fcs.append(
nn.Linear(self.FEAT_LAST_UNPOOL[backbone], num_classes))
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
elif isinstance(m,
(nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert isinstance(x, (list, tuple))
if self.with_last_layer_unpool:
last_x = x[-1]
x = self.multi_pooling(x)
if self.with_norm:
x = [n(xx) for n, xx in zip(self.norms, x)]
if self.with_last_layer_unpool:
x.append(last_x)
x = [xx.view(xx.size(0), -1) for xx in x]
x = [fc(xx) for fc, xx in zip(self.fcs, x)]
return x
def loss(self, cls_score, labels):
losses = dict()
for i, s in enumerate(cls_score):
# keys must contain "loss"
losses['loss.{}'.format(i + 1)] = self.criterion(s, labels)
losses['acc.{}'.format(i + 1)] = accuracy(s, labels)
return losses
| 2,682 | 32.962025 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/models/heads/latent_pred_head.py | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .. import builder
@HEADS.register_module
class LatentPredictHead(nn.Module):
"""Head for contrastive learning.
"""
def __init__(self, predictor, size_average=True):
super(LatentPredictHead, self).__init__()
self.predictor = builder.build_neck(predictor)
self.size_average = size_average
def init_weights(self, init_linear='normal'):
self.predictor.init_weights(init_linear=init_linear)
def forward(self, input, target):
"""Forward head.
Args:
input (Tensor): NxC input features.
target (Tensor): NxC target features.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
pred = self.predictor([input])[0]
pred_norm = nn.functional.normalize(pred, dim=1)
target_norm = nn.functional.normalize(target, dim=1)
loss = -2 * (pred_norm * target_norm).sum()
if self.size_average:
loss /= input.size(0)
return dict(loss=loss)
@HEADS.register_module
class LatentClsHead(nn.Module):
"""Head for contrastive learning.
"""
def __init__(self, predictor):
super(LatentClsHead, self).__init__()
self.predictor = nn.Linear(predictor.in_channels,
predictor.num_classes)
self.criterion = nn.CrossEntropyLoss()
def init_weights(self, init_linear='normal'):
normal_init(self.predictor, std=0.01)
def forward(self, input, target):
"""Forward head.
Args:
input (Tensor): NxC input features.
target (Tensor): NxC target features.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
pred = self.predictor(input)
with torch.no_grad():
label = torch.argmax(self.predictor(target), dim=1).detach()
loss = self.criterion(pred, label)
return dict(loss=loss)
| 2,048 | 28.695652 | 72 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/base.py | from abc import ABCMeta, abstractmethod
import torch
from torch.utils.data import Dataset
from openselfsup.utils import print_log, build_from_cfg
from torchvision.transforms import Compose
from .registry import DATASETS, PIPELINES
from .builder import build_datasource
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base dataset.
Args:
data_source (dict): Data source defined in
`openselfsup.datasets.data_sources`.
pipeline (list[dict]): A list of dict, where each element represents
an operation defined in `oenselfsup.datasets.pipelines`.
"""
def __init__(self, data_source, pipeline, prefetch=False):
self.data_source = build_datasource(data_source)
pipeline = [build_from_cfg(p, PIPELINES) for p in pipeline]
self.pipeline = Compose(pipeline)
self.prefetch = prefetch
def __len__(self):
return self.data_source.get_length()
@abstractmethod
def __getitem__(self, idx):
pass
@abstractmethod
def evaluate(self, scores, keyword, logger=None, **kwargs):
pass
| 1,105 | 26.65 | 76 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/classification.py | import torch
from openselfsup.utils import print_log
from .registry import DATASETS
from .base import BaseDataset
from .utils import to_numpy
@DATASETS.register_module
class ClassificationDataset(BaseDataset):
"""Dataset for classification.
"""
def __init__(self, data_source, pipeline, prefetch=False):
super(ClassificationDataset, self).__init__(data_source, pipeline, prefetch)
def __getitem__(self, idx):
img, target = self.data_source.get_sample(idx)
img = self.pipeline(img)
if self.prefetch:
img = torch.from_numpy(to_numpy(img))
return dict(img=img, gt_label=target)
def evaluate(self, scores, keyword, logger=None, topk=(1, 5)):
eval_res = {}
target = torch.LongTensor(self.data_source.labels)
assert scores.size(0) == target.size(0), \
"Inconsistent length for results and labels, {} vs {}".format(
scores.size(0), target.size(0))
num = scores.size(0)
_, pred = scores.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred)) # KxN
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0).item()
acc = correct_k * 100.0 / num
eval_res["{}_top{}".format(keyword, k)] = acc
if logger is not None and logger != 'silent':
print_log(
"{}_top{}: {:.03f}".format(keyword, k, acc),
logger=logger)
return eval_res
| 1,565 | 33.8 | 84 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/rotation_pred.py | import torch
from PIL import Image
from .registry import DATASETS
from .base import BaseDataset
def rotate(img):
"""Rotate input image with 0, 90, 180, and 270 degrees.
Args:
img (Tensor): input image of shape (C, H, W).
Returns:
list[Tensor]: A list of four rotated images.
"""
return [
img,
torch.flip(img.transpose(1, 2), [1]),
torch.flip(img, [1, 2]),
torch.flip(img, [1]).transpose(1, 2)
]
@DATASETS.register_module
class RotationPredDataset(BaseDataset):
"""Dataset for rotation prediction.
"""
def __init__(self, data_source, pipeline):
super(RotationPredDataset, self).__init__(data_source, pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img = self.pipeline(img)
img = torch.stack(rotate(img), dim=0)
rotation_labels = torch.LongTensor([0, 1, 2, 3])
return dict(img=img, rot_label=rotation_labels)
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 1,288 | 27.021739 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/relative_loc.py | from openselfsup.utils import build_from_cfg
import torch
from PIL import Image
from torchvision.transforms import Compose, RandomCrop
import torchvision.transforms.functional as TF
from .registry import DATASETS, PIPELINES
from .base import BaseDataset
def image_to_patches(img):
"""Crop split_per_side x split_per_side patches from input image.
Args:
img (PIL Image): input image.
Returns:
list[PIL Image]: A list of cropped patches.
"""
split_per_side = 3 # split of patches per image side
patch_jitter = 21 # jitter of each patch from each grid
h, w = img.size
h_grid = h // split_per_side
w_grid = w // split_per_side
h_patch = h_grid - patch_jitter
w_patch = w_grid - patch_jitter
assert h_patch > 0 and w_patch > 0
patches = []
for i in range(split_per_side):
for j in range(split_per_side):
p = TF.crop(img, i * h_grid, j * w_grid, h_grid, w_grid)
p = RandomCrop((h_patch, w_patch))(p)
patches.append(p)
return patches
@DATASETS.register_module
class RelativeLocDataset(BaseDataset):
"""Dataset for relative patch location.
"""
def __init__(self, data_source, pipeline, format_pipeline):
super(RelativeLocDataset, self).__init__(data_source, pipeline)
format_pipeline = [build_from_cfg(p, PIPELINES) for p in format_pipeline]
self.format_pipeline = Compose(format_pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
assert isinstance(img, Image.Image), \
'The output from the data source must be an Image, got: {}. \
Please ensure that the list file does not contain labels.'.format(
type(img))
img = self.pipeline(img)
patches = image_to_patches(img)
patches = [self.format_pipeline(p) for p in patches]
perms = []
# create a list of patch pairs
[perms.append(torch.cat((patches[i], patches[4]), dim=0)) for i in range(9) if i != 4]
# create corresponding labels for patch pairs
patch_labels = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])
return dict(img=torch.stack(perms), patch_label=patch_labels) # 8(2C)HW, 8
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
| 2,327 | 34.272727 | 94 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/datasets/dataset_wrappers.py | import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .registry import DATASETS
@DATASETS.register_module
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
@DATASETS.register_module
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def __len__(self):
return self.times * self._ori_len
| 1,639 | 28.285714 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.