repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.2/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.2/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.7/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.7/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.7/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.9/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.9/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.9/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.4/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.4/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.4/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.9/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.9/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.9/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.5/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.5/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.5/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.5/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.5/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.5/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.9/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.9/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.9/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_3.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_3.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_3.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.2/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.2/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.2/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_3.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_3.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_3.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.2/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.2/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.2/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.2/test/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.2/test/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.2/test/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.0/test/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.0/test/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.0/test/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.4/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.4/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.4/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.4/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.4/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.4/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_1.8/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_spaclust/beh2_wfn_bl_2.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.2/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.2/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.2/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_3.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_3.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_3.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.2/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.2/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.2/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.2/test/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.2/test/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.2/test/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_1.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.8/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/beh2/beh2_permut/simulations/beh2_wfn_bl_2.8/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.